1
2#include <linux/export.h>
3#include <linux/bitops.h>
4#include <linux/elf.h>
5#include <linux/mm.h>
6
7#include <linux/io.h>
8#include <linux/sched.h>
9#include <linux/sched/clock.h>
10#include <linux/random.h>
11#include <linux/topology.h>
12#include <asm/processor.h>
13#include <asm/apic.h>
14#include <asm/cacheinfo.h>
15#include <asm/cpu.h>
16#include <asm/spec-ctrl.h>
17#include <asm/smp.h>
18#include <asm/numa.h>
19#include <asm/pci-direct.h>
20#include <asm/delay.h>
21#include <asm/debugreg.h>
22#include <asm/resctrl.h>
23
24#ifdef CONFIG_X86_64
25# include <asm/mmconfig.h>
26# include <asm/set_memory.h>
27#endif
28
29#include "cpu.h"
30
31static const int amd_erratum_383[];
32static const int amd_erratum_400[];
33static const int amd_erratum_1054[];
34static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
35
36
37
38
39
40
41static u32 nodes_per_socket = 1;
42
43static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
44{
45 u32 gprs[8] = { 0 };
46 int err;
47
48 WARN_ONCE((boot_cpu_data.x86 != 0xf),
49 "%s should only be used on K8!\n", __func__);
50
51 gprs[1] = msr;
52 gprs[7] = 0x9c5a203a;
53
54 err = rdmsr_safe_regs(gprs);
55
56 *p = gprs[0] | ((u64)gprs[2] << 32);
57
58 return err;
59}
60
61static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
62{
63 u32 gprs[8] = { 0 };
64
65 WARN_ONCE((boot_cpu_data.x86 != 0xf),
66 "%s should only be used on K8!\n", __func__);
67
68 gprs[0] = (u32)val;
69 gprs[1] = msr;
70 gprs[2] = val >> 32;
71 gprs[7] = 0x9c5a203a;
72
73 return wrmsr_safe_regs(gprs);
74}
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90#ifdef CONFIG_X86_32
91extern __visible void vide(void);
92__asm__(".text\n"
93 ".globl vide\n"
94 ".type vide, @function\n"
95 ".align 4\n"
96 "vide: ret\n");
97#endif
98
99static void init_amd_k5(struct cpuinfo_x86 *c)
100{
101#ifdef CONFIG_X86_32
102
103
104
105
106
107
108#define CBAR (0xfffc)
109#define CBAR_ENB (0x80000000)
110#define CBAR_KEY (0X000000CB)
111 if (c->x86_model == 9 || c->x86_model == 10) {
112 if (inl(CBAR) & CBAR_ENB)
113 outl(0 | CBAR_KEY, CBAR);
114 }
115#endif
116}
117
118static void init_amd_k6(struct cpuinfo_x86 *c)
119{
120#ifdef CONFIG_X86_32
121 u32 l, h;
122 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
123
124 if (c->x86_model < 6) {
125
126 if (c->x86_model == 0) {
127 clear_cpu_cap(c, X86_FEATURE_APIC);
128 set_cpu_cap(c, X86_FEATURE_PGE);
129 }
130 return;
131 }
132
133 if (c->x86_model == 6 && c->x86_stepping == 1) {
134 const int K6_BUG_LOOP = 1000000;
135 int n;
136 void (*f_vide)(void);
137 u64 d, d2;
138
139 pr_info("AMD K6 stepping B detected - ");
140
141
142
143
144
145
146 n = K6_BUG_LOOP;
147 f_vide = vide;
148 OPTIMIZER_HIDE_VAR(f_vide);
149 d = rdtsc();
150 while (n--)
151 f_vide();
152 d2 = rdtsc();
153 d = d2-d;
154
155 if (d > 20*K6_BUG_LOOP)
156 pr_cont("system stability may be impaired when more than 32 MB are used.\n");
157 else
158 pr_cont("probably OK (after B9730xxxx).\n");
159 }
160
161
162 if (c->x86_model < 8 ||
163 (c->x86_model == 8 && c->x86_stepping < 8)) {
164
165 if (mbytes > 508)
166 mbytes = 508;
167
168 rdmsr(MSR_K6_WHCR, l, h);
169 if ((l&0x0000FFFF) == 0) {
170 unsigned long flags;
171 l = (1<<0)|((mbytes/4)<<1);
172 local_irq_save(flags);
173 wbinvd();
174 wrmsr(MSR_K6_WHCR, l, h);
175 local_irq_restore(flags);
176 pr_info("Enabling old style K6 write allocation for %d Mb\n",
177 mbytes);
178 }
179 return;
180 }
181
182 if ((c->x86_model == 8 && c->x86_stepping > 7) ||
183 c->x86_model == 9 || c->x86_model == 13) {
184
185
186 if (mbytes > 4092)
187 mbytes = 4092;
188
189 rdmsr(MSR_K6_WHCR, l, h);
190 if ((l&0xFFFF0000) == 0) {
191 unsigned long flags;
192 l = ((mbytes>>2)<<22)|(1<<16);
193 local_irq_save(flags);
194 wbinvd();
195 wrmsr(MSR_K6_WHCR, l, h);
196 local_irq_restore(flags);
197 pr_info("Enabling new style K6 write allocation for %d Mb\n",
198 mbytes);
199 }
200
201 return;
202 }
203
204 if (c->x86_model == 10) {
205
206
207 return;
208 }
209#endif
210}
211
212static void init_amd_k7(struct cpuinfo_x86 *c)
213{
214#ifdef CONFIG_X86_32
215 u32 l, h;
216
217
218
219
220
221
222 if (c->x86_model >= 6 && c->x86_model <= 10) {
223 if (!cpu_has(c, X86_FEATURE_XMM)) {
224 pr_info("Enabling disabled K7/SSE Support.\n");
225 msr_clear_bit(MSR_K7_HWCR, 15);
226 set_cpu_cap(c, X86_FEATURE_XMM);
227 }
228 }
229
230
231
232
233
234
235 if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
236 rdmsr(MSR_K7_CLK_CTL, l, h);
237 if ((l & 0xfff00000) != 0x20000000) {
238 pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
239 l, ((l & 0x000fffff)|0x20000000));
240 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
241 }
242 }
243
244
245 if (!c->cpu_index)
246 return;
247
248
249
250
251
252
253 if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
254 (c->x86_stepping == 1)))
255 return;
256
257
258 if ((c->x86_model == 7) && (c->x86_stepping == 0))
259 return;
260
261
262
263
264
265
266
267
268 if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
269 ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
270 (c->x86_model > 7))
271 if (cpu_has(c, X86_FEATURE_MP))
272 return;
273
274
275
276
277
278
279
280 WARN_ONCE(1, "WARNING: This combination of AMD"
281 " processors is not suitable for SMP.\n");
282 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
283#endif
284}
285
286#ifdef CONFIG_NUMA
287
288
289
290
291static int nearby_node(int apicid)
292{
293 int i, node;
294
295 for (i = apicid - 1; i >= 0; i--) {
296 node = __apicid_to_node[i];
297 if (node != NUMA_NO_NODE && node_online(node))
298 return node;
299 }
300 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
301 node = __apicid_to_node[i];
302 if (node != NUMA_NO_NODE && node_online(node))
303 return node;
304 }
305 return first_node(node_online_map);
306}
307#endif
308
309
310
311
312
313
314static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
315{
316 u32 cus_per_node;
317
318 if (c->x86 >= 0x17)
319 return;
320
321 cus_per_node = c->x86_max_cores / nodes_per_socket;
322 c->cpu_core_id %= cus_per_node;
323}
324
325
326
327
328
329
330
331static void amd_get_topology(struct cpuinfo_x86 *c)
332{
333 u8 node_id;
334 int cpu = smp_processor_id();
335
336
337 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
338 int err;
339 u32 eax, ebx, ecx, edx;
340
341 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
342
343 node_id = ecx & 0xff;
344
345 if (c->x86 == 0x15)
346 c->cu_id = ebx & 0xff;
347
348 if (c->x86 >= 0x17) {
349 c->cpu_core_id = ebx & 0xff;
350
351 if (smp_num_siblings > 1)
352 c->x86_max_cores /= smp_num_siblings;
353 }
354
355
356
357
358
359 err = detect_extended_topology(c);
360 if (!err)
361 c->x86_coreid_bits = get_count_order(c->x86_max_cores);
362
363 cacheinfo_amd_init_llc_id(c, cpu, node_id);
364
365 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
366 u64 value;
367
368 rdmsrl(MSR_FAM10H_NODE_ID, value);
369 node_id = value & 7;
370
371 per_cpu(cpu_llc_id, cpu) = node_id;
372 } else
373 return;
374
375 if (nodes_per_socket > 1) {
376 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
377 legacy_fixup_core_id(c);
378 }
379}
380
381
382
383
384
385static void amd_detect_cmp(struct cpuinfo_x86 *c)
386{
387 unsigned bits;
388 int cpu = smp_processor_id();
389
390 bits = c->x86_coreid_bits;
391
392 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
393
394 c->phys_proc_id = c->initial_apicid >> bits;
395
396 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
397}
398
399static void amd_detect_ppin(struct cpuinfo_x86 *c)
400{
401 unsigned long long val;
402
403 if (!cpu_has(c, X86_FEATURE_AMD_PPIN))
404 return;
405
406
407 if (rdmsrl_safe(MSR_AMD_PPIN_CTL, &val))
408 goto clear_ppin;
409
410
411 if ((val & 3UL) == 1UL)
412 goto clear_ppin;
413
414
415 if (!(val & 2UL)) {
416 wrmsrl_safe(MSR_AMD_PPIN_CTL, val | 2UL);
417 rdmsrl_safe(MSR_AMD_PPIN_CTL, &val);
418 }
419
420
421 if (val & 2UL)
422 return;
423
424clear_ppin:
425 clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
426}
427
428u16 amd_get_nb_id(int cpu)
429{
430 return per_cpu(cpu_llc_id, cpu);
431}
432EXPORT_SYMBOL_GPL(amd_get_nb_id);
433
434u32 amd_get_nodes_per_socket(void)
435{
436 return nodes_per_socket;
437}
438EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
439
440static void srat_detect_node(struct cpuinfo_x86 *c)
441{
442#ifdef CONFIG_NUMA
443 int cpu = smp_processor_id();
444 int node;
445 unsigned apicid = c->apicid;
446
447 node = numa_cpu_node(cpu);
448 if (node == NUMA_NO_NODE)
449 node = per_cpu(cpu_llc_id, cpu);
450
451
452
453
454
455
456 if (x86_cpuinit.fixup_cpu_id)
457 x86_cpuinit.fixup_cpu_id(c, node);
458
459 if (!node_online(node)) {
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479 int ht_nodeid = c->initial_apicid;
480
481 if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
482 node = __apicid_to_node[ht_nodeid];
483
484 if (!node_online(node))
485 node = nearby_node(apicid);
486 }
487 numa_set_node(cpu, node);
488#endif
489}
490
491static void early_init_amd_mc(struct cpuinfo_x86 *c)
492{
493#ifdef CONFIG_SMP
494 unsigned bits, ecx;
495
496
497 if (c->extended_cpuid_level < 0x80000008)
498 return;
499
500 ecx = cpuid_ecx(0x80000008);
501
502 c->x86_max_cores = (ecx & 0xff) + 1;
503
504
505 bits = (ecx >> 12) & 0xF;
506
507
508 if (bits == 0) {
509 while ((1 << bits) < c->x86_max_cores)
510 bits++;
511 }
512
513 c->x86_coreid_bits = bits;
514#endif
515}
516
517static void bsp_init_amd(struct cpuinfo_x86 *c)
518{
519
520#ifdef CONFIG_X86_64
521 if (c->x86 >= 0xf) {
522 unsigned long long tseg;
523
524
525
526
527
528
529 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
530 unsigned long pfn = tseg >> PAGE_SHIFT;
531
532 pr_debug("tseg: %010llx\n", tseg);
533 if (pfn_range_is_mapped(pfn, pfn + 1))
534 set_memory_4k((unsigned long)__va(tseg), 1);
535 }
536 }
537#endif
538
539 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
540
541 if (c->x86 > 0x10 ||
542 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
543 u64 val;
544
545 rdmsrl(MSR_K7_HWCR, val);
546 if (!(val & BIT(24)))
547 pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
548 }
549 }
550
551 if (c->x86 == 0x15) {
552 unsigned long upperbit;
553 u32 cpuid, assoc;
554
555 cpuid = cpuid_edx(0x80000005);
556 assoc = cpuid >> 16 & 0xff;
557 upperbit = ((cpuid >> 24) << 10) / assoc;
558
559 va_align.mask = (upperbit - 1) & PAGE_MASK;
560 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
561
562
563 va_align.bits = get_random_int() & va_align.mask;
564 }
565
566 if (cpu_has(c, X86_FEATURE_MWAITX))
567 use_mwaitx_delay();
568
569 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
570 u32 ecx;
571
572 ecx = cpuid_ecx(0x8000001e);
573 nodes_per_socket = ((ecx >> 8) & 7) + 1;
574 } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
575 u64 value;
576
577 rdmsrl(MSR_FAM10H_NODE_ID, value);
578 nodes_per_socket = ((value >> 3) & 7) + 1;
579 }
580
581 if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
582 !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
583 c->x86 >= 0x15 && c->x86 <= 0x17) {
584 unsigned int bit;
585
586 switch (c->x86) {
587 case 0x15: bit = 54; break;
588 case 0x16: bit = 33; break;
589 case 0x17: bit = 10; break;
590 default: return;
591 }
592
593
594
595
596 if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
597 setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
598 setup_force_cpu_cap(X86_FEATURE_SSBD);
599 x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
600 }
601 }
602
603 resctrl_cpu_detect(c);
604}
605
606static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
607{
608 u64 msr;
609
610
611
612
613
614
615
616
617
618
619
620
621
622 if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
623
624 rdmsrl(MSR_K8_SYSCFG, msr);
625 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
626 goto clear_all;
627
628
629
630
631
632
633 c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
634
635 if (IS_ENABLED(CONFIG_X86_32))
636 goto clear_all;
637
638 rdmsrl(MSR_K7_HWCR, msr);
639 if (!(msr & MSR_K7_HWCR_SMMLOCK))
640 goto clear_sev;
641
642 return;
643
644clear_all:
645 setup_clear_cpu_cap(X86_FEATURE_SME);
646clear_sev:
647 setup_clear_cpu_cap(X86_FEATURE_SEV);
648 setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
649 }
650}
651
652static void early_init_amd(struct cpuinfo_x86 *c)
653{
654 u64 value;
655 u32 dummy;
656
657 early_init_amd_mc(c);
658
659#ifdef CONFIG_X86_32
660 if (c->x86 == 6)
661 set_cpu_cap(c, X86_FEATURE_K7);
662#endif
663
664 if (c->x86 >= 0xf)
665 set_cpu_cap(c, X86_FEATURE_K8);
666
667 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
668
669
670
671
672
673 if (c->x86_power & (1 << 8)) {
674 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
675 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
676 }
677
678
679 if (c->x86_power & BIT(12))
680 set_cpu_cap(c, X86_FEATURE_ACC_POWER);
681
682#ifdef CONFIG_X86_64
683 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
684#else
685
686 if (c->x86 == 5)
687 if (c->x86_model == 13 || c->x86_model == 9 ||
688 (c->x86_model == 8 && c->x86_stepping >= 8))
689 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
690#endif
691#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
692
693
694
695
696
697
698 if (boot_cpu_has(X86_FEATURE_APIC)) {
699 if (c->x86 > 0x16)
700 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
701 else if (c->x86 >= 0xf) {
702
703 unsigned int val;
704
705 val = read_pci_config(0, 24, 0, 0x68);
706 if ((val >> 17 & 0x3) == 0x3)
707 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
708 }
709 }
710#endif
711
712
713
714
715
716
717 set_cpu_cap(c, X86_FEATURE_VMMCALL);
718
719
720 if (c->x86 == 0x16 && c->x86_model <= 0xf)
721 msr_set_bit(MSR_AMD64_LS_CFG, 15);
722
723
724
725
726
727
728
729 if (cpu_has_amd_erratum(c, amd_erratum_400))
730 set_cpu_bug(c, X86_BUG_AMD_E400);
731
732 early_detect_mem_encrypt(c);
733
734
735 if (c->x86 == 0x15 &&
736 (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
737 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
738
739 if (msr_set_bit(0xc0011005, 54) > 0) {
740 rdmsrl(0xc0011005, value);
741 if (value & BIT_64(54)) {
742 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
743 pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
744 }
745 }
746 }
747
748 if (cpu_has(c, X86_FEATURE_TOPOEXT))
749 smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
750}
751
752static void init_amd_k8(struct cpuinfo_x86 *c)
753{
754 u32 level;
755 u64 value;
756
757
758 level = cpuid_eax(1);
759 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
760 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
761
762
763
764
765
766
767 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
768 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
769 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
770 value &= ~BIT_64(32);
771 wrmsrl_amd_safe(0xc001100d, value);
772 }
773 }
774
775 if (!c->x86_model_id[0])
776 strcpy(c->x86_model_id, "Hammer");
777
778#ifdef CONFIG_SMP
779
780
781
782
783
784
785
786 msr_set_bit(MSR_K7_HWCR, 6);
787#endif
788 set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
789}
790
791static void init_amd_gh(struct cpuinfo_x86 *c)
792{
793#ifdef CONFIG_MMCONF_FAM10H
794
795 if (c == &boot_cpu_data)
796 check_enable_amd_mmconf_dmi();
797
798 fam10h_check_enable_mmcfg();
799#endif
800
801
802
803
804
805
806
807
808
809 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
810
811
812
813
814
815
816
817
818
819
820 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
821
822 if (cpu_has_amd_erratum(c, amd_erratum_383))
823 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
824}
825
826#define MSR_AMD64_DE_CFG 0xC0011029
827
828static void init_amd_ln(struct cpuinfo_x86 *c)
829{
830
831
832
833
834 msr_set_bit(MSR_AMD64_DE_CFG, 31);
835}
836
837static bool rdrand_force;
838
839static int __init rdrand_cmdline(char *str)
840{
841 if (!str)
842 return -EINVAL;
843
844 if (!strcmp(str, "force"))
845 rdrand_force = true;
846 else
847 return -EINVAL;
848
849 return 0;
850}
851early_param("rdrand", rdrand_cmdline);
852
853static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
854{
855
856
857
858
859
860 if (!IS_ENABLED(CONFIG_PM_SLEEP))
861 return;
862
863
864
865
866
867 if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
868 return;
869
870 msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
871
872
873
874
875
876 if (cpuid_ecx(1) & BIT(30)) {
877 pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
878 return;
879 }
880
881 clear_cpu_cap(c, X86_FEATURE_RDRAND);
882 pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
883}
884
885static void init_amd_jg(struct cpuinfo_x86 *c)
886{
887
888
889
890
891
892 clear_rdrand_cpuid_bit(c);
893}
894
895static void init_amd_bd(struct cpuinfo_x86 *c)
896{
897 u64 value;
898
899
900
901
902
903 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
904 if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
905 value |= 0x1E;
906 wrmsrl_safe(MSR_F15H_IC_CFG, value);
907 }
908 }
909
910
911
912
913
914
915 clear_rdrand_cpuid_bit(c);
916}
917
918static void init_amd_zn(struct cpuinfo_x86 *c)
919{
920 set_cpu_cap(c, X86_FEATURE_ZEN);
921
922#ifdef CONFIG_NUMA
923 node_reclaim_distance = 32;
924#endif
925
926
927
928
929
930 if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
931 set_cpu_cap(c, X86_FEATURE_CPB);
932}
933
934static void init_amd(struct cpuinfo_x86 *c)
935{
936 early_init_amd(c);
937
938
939
940
941
942 clear_cpu_cap(c, 0*32+31);
943
944 if (c->x86 >= 0x10)
945 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
946
947
948 c->apicid = hard_smp_processor_id();
949
950
951 if (c->x86 < 6)
952 clear_cpu_cap(c, X86_FEATURE_MCE);
953
954 switch (c->x86) {
955 case 4: init_amd_k5(c); break;
956 case 5: init_amd_k6(c); break;
957 case 6: init_amd_k7(c); break;
958 case 0xf: init_amd_k8(c); break;
959 case 0x10: init_amd_gh(c); break;
960 case 0x12: init_amd_ln(c); break;
961 case 0x15: init_amd_bd(c); break;
962 case 0x16: init_amd_jg(c); break;
963 case 0x17: fallthrough;
964 case 0x19: init_amd_zn(c); break;
965 }
966
967
968
969
970
971 if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
972 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
973
974 cpu_detect_cache_sizes(c);
975
976 amd_detect_cmp(c);
977 amd_get_topology(c);
978 srat_detect_node(c);
979 amd_detect_ppin(c);
980
981 init_amd_cacheinfo(c);
982
983 if (cpu_has(c, X86_FEATURE_XMM2)) {
984
985
986
987
988
989
990 msr_set_bit(MSR_F10H_DECFG,
991 MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
992
993
994 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
995 }
996
997
998
999
1000
1001 if (c->x86 > 0x11)
1002 set_cpu_cap(c, X86_FEATURE_ARAT);
1003
1004
1005 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
1006 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
1007 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
1008
1009
1010 if (!cpu_has(c, X86_FEATURE_XENPV))
1011 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1012
1013
1014
1015
1016
1017
1018 if (cpu_has(c, X86_FEATURE_IRPERF) &&
1019 !cpu_has_amd_erratum(c, amd_erratum_1054))
1020 msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1021}
1022
1023#ifdef CONFIG_X86_32
1024static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1025{
1026
1027 if (c->x86 == 6) {
1028
1029 if (c->x86_model == 3 && c->x86_stepping == 0)
1030 size = 64;
1031
1032 if (c->x86_model == 4 &&
1033 (c->x86_stepping == 0 || c->x86_stepping == 1))
1034 size = 256;
1035 }
1036 return size;
1037}
1038#endif
1039
1040static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1041{
1042 u32 ebx, eax, ecx, edx;
1043 u16 mask = 0xfff;
1044
1045 if (c->x86 < 0xf)
1046 return;
1047
1048 if (c->extended_cpuid_level < 0x80000006)
1049 return;
1050
1051 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1052
1053 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1054 tlb_lli_4k[ENTRIES] = ebx & mask;
1055
1056
1057
1058
1059
1060 if (c->x86 == 0xf) {
1061 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1062 mask = 0xff;
1063 }
1064
1065
1066 if (!((eax >> 16) & mask))
1067 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1068 else
1069 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1070
1071
1072 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1073
1074
1075 if (!(eax & mask)) {
1076
1077 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1078 tlb_lli_2m[ENTRIES] = 1024;
1079 } else {
1080 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1081 tlb_lli_2m[ENTRIES] = eax & 0xff;
1082 }
1083 } else
1084 tlb_lli_2m[ENTRIES] = eax & mask;
1085
1086 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1087}
1088
1089static const struct cpu_dev amd_cpu_dev = {
1090 .c_vendor = "AMD",
1091 .c_ident = { "AuthenticAMD" },
1092#ifdef CONFIG_X86_32
1093 .legacy_models = {
1094 { .family = 4, .model_names =
1095 {
1096 [3] = "486 DX/2",
1097 [7] = "486 DX/2-WB",
1098 [8] = "486 DX/4",
1099 [9] = "486 DX/4-WB",
1100 [14] = "Am5x86-WT",
1101 [15] = "Am5x86-WB"
1102 }
1103 },
1104 },
1105 .legacy_cache_size = amd_size_cache,
1106#endif
1107 .c_early_init = early_init_amd,
1108 .c_detect_tlb = cpu_detect_tlb_amd,
1109 .c_bsp_init = bsp_init_amd,
1110 .c_init = init_amd,
1111 .c_x86_vendor = X86_VENDOR_AMD,
1112};
1113
1114cpu_dev_register(amd_cpu_dev);
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1134#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1135#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1136 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1137#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1138#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1139#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1140
1141static const int amd_erratum_400[] =
1142 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1143 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1144
1145static const int amd_erratum_383[] =
1146 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1147
1148
1149static const int amd_erratum_1054[] =
1150 AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1151
1152static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1153{
1154 int osvw_id = *erratum++;
1155 u32 range;
1156 u32 ms;
1157
1158 if (osvw_id >= 0 && osvw_id < 65536 &&
1159 cpu_has(cpu, X86_FEATURE_OSVW)) {
1160 u64 osvw_len;
1161
1162 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1163 if (osvw_id < osvw_len) {
1164 u64 osvw_bits;
1165
1166 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1167 osvw_bits);
1168 return osvw_bits & (1ULL << (osvw_id & 0x3f));
1169 }
1170 }
1171
1172
1173 ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1174 while ((range = *erratum++))
1175 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1176 (ms >= AMD_MODEL_RANGE_START(range)) &&
1177 (ms <= AMD_MODEL_RANGE_END(range)))
1178 return true;
1179
1180 return false;
1181}
1182
1183void set_dr_addr_mask(unsigned long mask, int dr)
1184{
1185 if (!boot_cpu_has(X86_FEATURE_BPEXT))
1186 return;
1187
1188 switch (dr) {
1189 case 0:
1190 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1191 break;
1192 case 1:
1193 case 2:
1194 case 3:
1195 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1196 break;
1197 default:
1198 break;
1199 }
1200}
1201