1#include <linux/export.h>
2#include <linux/init.h>
3#include <linux/bitops.h>
4#include <linux/elf.h>
5#include <linux/mm.h>
6
7#include <linux/io.h>
8#include <linux/sched.h>
9#include <asm/processor.h>
10#include <asm/apic.h>
11#include <asm/cpu.h>
12#include <asm/pci-direct.h>
13
14#ifdef CONFIG_X86_64
15# include <asm/mmconfig.h>
16# include <asm/cacheflush.h>
17#endif
18
19#include "cpu.h"
20
21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
22{
23 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
24 u32 gprs[8] = { 0 };
25 int err;
26
27 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
28
29 gprs[1] = msr;
30 gprs[7] = 0x9c5a203a;
31
32 err = rdmsr_safe_regs(gprs);
33
34 *p = gprs[0] | ((u64)gprs[2] << 32);
35
36 return err;
37}
38
39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
40{
41 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
42 u32 gprs[8] = { 0 };
43
44 WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
45
46 gprs[0] = (u32)val;
47 gprs[1] = msr;
48 gprs[2] = val >> 32;
49 gprs[7] = 0x9c5a203a;
50
51 return wrmsr_safe_regs(gprs);
52}
53
54#ifdef CONFIG_X86_32
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69extern void vide(void);
70__asm__(".align 4\nvide: ret");
71
72static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
73{
74
75
76
77
78
79
80#define CBAR (0xfffc)
81#define CBAR_ENB (0x80000000)
82#define CBAR_KEY (0X000000CB)
83 if (c->x86_model == 9 || c->x86_model == 10) {
84 if (inl(CBAR) & CBAR_ENB)
85 outl(0 | CBAR_KEY, CBAR);
86 }
87}
88
89
90static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
91{
92 u32 l, h;
93 int mbytes = num_physpages >> (20-PAGE_SHIFT);
94
95 if (c->x86_model < 6) {
96
97 if (c->x86_model == 0) {
98 clear_cpu_cap(c, X86_FEATURE_APIC);
99 set_cpu_cap(c, X86_FEATURE_PGE);
100 }
101 return;
102 }
103
104 if (c->x86_model == 6 && c->x86_mask == 1) {
105 const int K6_BUG_LOOP = 1000000;
106 int n;
107 void (*f_vide)(void);
108 unsigned long d, d2;
109
110 printk(KERN_INFO "AMD K6 stepping B detected - ");
111
112
113
114
115
116
117 n = K6_BUG_LOOP;
118 f_vide = vide;
119 rdtscl(d);
120 while (n--)
121 f_vide();
122 rdtscl(d2);
123 d = d2-d;
124
125 if (d > 20*K6_BUG_LOOP)
126 printk(KERN_CONT
127 "system stability may be impaired when more than 32 MB are used.\n");
128 else
129 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
130 }
131
132
133 if (c->x86_model < 8 ||
134 (c->x86_model == 8 && c->x86_mask < 8)) {
135
136 if (mbytes > 508)
137 mbytes = 508;
138
139 rdmsr(MSR_K6_WHCR, l, h);
140 if ((l&0x0000FFFF) == 0) {
141 unsigned long flags;
142 l = (1<<0)|((mbytes/4)<<1);
143 local_irq_save(flags);
144 wbinvd();
145 wrmsr(MSR_K6_WHCR, l, h);
146 local_irq_restore(flags);
147 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
148 mbytes);
149 }
150 return;
151 }
152
153 if ((c->x86_model == 8 && c->x86_mask > 7) ||
154 c->x86_model == 9 || c->x86_model == 13) {
155
156
157 if (mbytes > 4092)
158 mbytes = 4092;
159
160 rdmsr(MSR_K6_WHCR, l, h);
161 if ((l&0xFFFF0000) == 0) {
162 unsigned long flags;
163 l = ((mbytes>>2)<<22)|(1<<16);
164 local_irq_save(flags);
165 wbinvd();
166 wrmsr(MSR_K6_WHCR, l, h);
167 local_irq_restore(flags);
168 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
169 mbytes);
170 }
171
172 return;
173 }
174
175 if (c->x86_model == 10) {
176
177
178 return;
179 }
180}
181
182static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
183{
184
185 if (!c->cpu_index)
186 return;
187
188
189
190
191
192
193 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
194 (c->x86_mask == 1)))
195 goto valid_k7;
196
197
198 if ((c->x86_model == 7) && (c->x86_mask == 0))
199 goto valid_k7;
200
201
202
203
204
205
206
207
208 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
209 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
210 (c->x86_model > 7))
211 if (cpu_has_mp)
212 goto valid_k7;
213
214
215
216
217
218
219
220 WARN_ONCE(1, "WARNING: This combination of AMD"
221 " processors is not suitable for SMP.\n");
222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
223
224valid_k7:
225 ;
226}
227
228static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
229{
230 u32 l, h;
231
232
233
234
235
236
237 if (c->x86_model >= 6 && c->x86_model <= 10) {
238 if (!cpu_has(c, X86_FEATURE_XMM)) {
239 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
240 rdmsr(MSR_K7_HWCR, l, h);
241 l &= ~0x00008000;
242 wrmsr(MSR_K7_HWCR, l, h);
243 set_cpu_cap(c, X86_FEATURE_XMM);
244 }
245 }
246
247
248
249
250
251
252 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
253 rdmsr(MSR_K7_CLK_CTL, l, h);
254 if ((l & 0xfff00000) != 0x20000000) {
255 printk(KERN_INFO
256 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
257 l, ((l & 0x000fffff)|0x20000000));
258 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
259 }
260 }
261
262 set_cpu_cap(c, X86_FEATURE_K7);
263
264 amd_k7_smp_check(c);
265}
266#endif
267
268#ifdef CONFIG_NUMA
269
270
271
272
273static int __cpuinit nearby_node(int apicid)
274{
275 int i, node;
276
277 for (i = apicid - 1; i >= 0; i--) {
278 node = __apicid_to_node[i];
279 if (node != NUMA_NO_NODE && node_online(node))
280 return node;
281 }
282 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
283 node = __apicid_to_node[i];
284 if (node != NUMA_NO_NODE && node_online(node))
285 return node;
286 }
287 return first_node(node_online_map);
288}
289#endif
290
291
292
293
294
295
296
297#ifdef CONFIG_X86_HT
298static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
299{
300 u32 nodes, cores_per_cu = 1;
301 u8 node_id;
302 int cpu = smp_processor_id();
303
304
305 if (cpu_has_topoext) {
306 u32 eax, ebx, ecx, edx;
307
308 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
309 nodes = ((ecx >> 8) & 7) + 1;
310 node_id = ecx & 7;
311
312
313 smp_num_siblings = ((ebx >> 8) & 3) + 1;
314 c->compute_unit_id = ebx & 0xff;
315 cores_per_cu += ((ebx >> 8) & 3);
316 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
317 u64 value;
318
319 rdmsrl(MSR_FAM10H_NODE_ID, value);
320 nodes = ((value >> 3) & 7) + 1;
321 node_id = value & 7;
322 } else
323 return;
324
325
326 if (nodes > 1) {
327 u32 cores_per_node;
328 u32 cus_per_node;
329
330 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
331 cores_per_node = c->x86_max_cores / nodes;
332 cus_per_node = cores_per_node / cores_per_cu;
333
334
335 per_cpu(cpu_llc_id, cpu) = node_id;
336
337
338 c->cpu_core_id %= cores_per_node;
339 c->compute_unit_id %= cus_per_node;
340 }
341}
342#endif
343
344
345
346
347
348static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
349{
350#ifdef CONFIG_X86_HT
351 unsigned bits;
352 int cpu = smp_processor_id();
353
354 bits = c->x86_coreid_bits;
355
356 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
357
358 c->phys_proc_id = c->initial_apicid >> bits;
359
360 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
361 amd_get_topology(c);
362#endif
363}
364
365u16 amd_get_nb_id(int cpu)
366{
367 u16 id = 0;
368#ifdef CONFIG_SMP
369 id = per_cpu(cpu_llc_id, cpu);
370#endif
371 return id;
372}
373EXPORT_SYMBOL_GPL(amd_get_nb_id);
374
375static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
376{
377#ifdef CONFIG_NUMA
378 int cpu = smp_processor_id();
379 int node;
380 unsigned apicid = c->apicid;
381
382 node = numa_cpu_node(cpu);
383 if (node == NUMA_NO_NODE)
384 node = per_cpu(cpu_llc_id, cpu);
385
386
387
388
389
390
391 if (x86_cpuinit.fixup_cpu_id)
392 x86_cpuinit.fixup_cpu_id(c, node);
393
394 if (!node_online(node)) {
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414 int ht_nodeid = c->initial_apicid;
415
416 if (ht_nodeid >= 0 &&
417 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
418 node = __apicid_to_node[ht_nodeid];
419
420 if (!node_online(node))
421 node = nearby_node(apicid);
422 }
423 numa_set_node(cpu, node);
424#endif
425}
426
427static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
428{
429#ifdef CONFIG_X86_HT
430 unsigned bits, ecx;
431
432
433 if (c->extended_cpuid_level < 0x80000008)
434 return;
435
436 ecx = cpuid_ecx(0x80000008);
437
438 c->x86_max_cores = (ecx & 0xff) + 1;
439
440
441 bits = (ecx >> 12) & 0xF;
442
443
444 if (bits == 0) {
445 while ((1 << bits) < c->x86_max_cores)
446 bits++;
447 }
448
449 c->x86_coreid_bits = bits;
450#endif
451}
452
453static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
454{
455 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
456
457 if (c->x86 > 0x10 ||
458 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
459 u64 val;
460
461 rdmsrl(MSR_K7_HWCR, val);
462 if (!(val & BIT(24)))
463 printk(KERN_WARNING FW_BUG "TSC doesn't count "
464 "with P0 frequency!\n");
465 }
466 }
467
468 if (c->x86 == 0x15) {
469 unsigned long upperbit;
470 u32 cpuid, assoc;
471
472 cpuid = cpuid_edx(0x80000005);
473 assoc = cpuid >> 16 & 0xff;
474 upperbit = ((cpuid >> 24) << 10) / assoc;
475
476 va_align.mask = (upperbit - 1) & PAGE_MASK;
477 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
478 }
479}
480
481static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
482{
483 early_init_amd_mc(c);
484
485
486
487
488
489 if (c->x86_power & (1 << 8)) {
490 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
491 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
492 if (!check_tsc_unstable())
493 sched_clock_stable = 1;
494 }
495
496#ifdef CONFIG_X86_64
497 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
498#else
499
500 if (c->x86 == 5)
501 if (c->x86_model == 13 || c->x86_model == 9 ||
502 (c->x86_model == 8 && c->x86_mask >= 8))
503 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
504#endif
505#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
506
507 if (cpu_has_apic && c->x86 >= 0xf) {
508 unsigned int val;
509 val = read_pci_config(0, 24, 0, 0x68);
510 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
511 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
512 }
513#endif
514}
515
516static void __cpuinit init_amd(struct cpuinfo_x86 *c)
517{
518 u32 dummy;
519 unsigned long long value;
520
521#ifdef CONFIG_SMP
522
523
524
525
526
527
528
529 if (c->x86 == 0xf) {
530 rdmsrl(MSR_K7_HWCR, value);
531 value |= 1 << 6;
532 wrmsrl(MSR_K7_HWCR, value);
533 }
534#endif
535
536 early_init_amd(c);
537
538
539
540
541
542 clear_cpu_cap(c, 0*32+31);
543
544#ifdef CONFIG_X86_64
545
546 if (c->x86 == 0xf) {
547 u32 level;
548
549 level = cpuid_eax(1);
550 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
551 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
552
553
554
555
556
557
558 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
559 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
560 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
561 value &= ~(1ULL << 32);
562 wrmsrl_amd_safe(0xc001100d, value);
563 }
564 }
565
566 }
567 if (c->x86 >= 0x10)
568 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
569
570
571 c->apicid = hard_smp_processor_id();
572#else
573
574
575
576
577
578
579
580 switch (c->x86) {
581 case 4:
582 init_amd_k5(c);
583 break;
584 case 5:
585 init_amd_k6(c);
586 break;
587 case 6:
588 init_amd_k7(c);
589 break;
590 }
591
592
593 if (c->x86 < 6)
594 clear_cpu_cap(c, X86_FEATURE_MCE);
595#endif
596
597
598 if (c->x86 >= 6)
599 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
600
601 if (!c->x86_model_id[0]) {
602 switch (c->x86) {
603 case 0xf:
604
605
606 strcpy(c->x86_model_id, "Hammer");
607 break;
608 }
609 }
610
611
612 if ((c->x86 == 0x15) &&
613 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
614 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
615
616 if (!rdmsrl_safe(0xc0011005, &value)) {
617 value |= 1ULL << 54;
618 wrmsrl_safe(0xc0011005, value);
619 rdmsrl(0xc0011005, value);
620 if (value & (1ULL << 54)) {
621 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
622 printk(KERN_INFO FW_INFO "CPU: Re-enabling "
623 "disabled Topology Extensions Support\n");
624 }
625 }
626 }
627
628
629
630
631
632 if ((c->x86 == 0x15) &&
633 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
634
635 if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
636 value |= 0x1E;
637 wrmsrl_safe(0xc0011021, value);
638 }
639 }
640
641 cpu_detect_cache_sizes(c);
642
643
644 if (c->extended_cpuid_level >= 0x80000008) {
645 amd_detect_cmp(c);
646 srat_detect_node(c);
647 }
648
649#ifdef CONFIG_X86_32
650 detect_ht(c);
651#endif
652
653 init_amd_cacheinfo(c);
654
655 if (c->x86 >= 0xf)
656 set_cpu_cap(c, X86_FEATURE_K8);
657
658 if (cpu_has_xmm2) {
659
660 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
661 }
662
663#ifdef CONFIG_X86_64
664 if (c->x86 == 0x10) {
665
666 if (c == &boot_cpu_data)
667 check_enable_amd_mmconf_dmi();
668
669 fam10h_check_enable_mmcfg();
670 }
671
672 if (c == &boot_cpu_data && c->x86 >= 0xf) {
673 unsigned long long tseg;
674
675
676
677
678
679
680 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
681 unsigned long pfn = tseg >> PAGE_SHIFT;
682
683 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
684 if (pfn_range_is_mapped(pfn, pfn + 1))
685 set_memory_4k((unsigned long)__va(tseg), 1);
686 }
687 }
688#endif
689
690
691
692
693
694 if (c->x86 > 0x11)
695 set_cpu_cap(c, X86_FEATURE_ARAT);
696
697 if (c->x86 == 0x10) {
698
699
700
701
702
703
704
705
706
707 u64 mask;
708 int err;
709
710 err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
711 if (err == 0) {
712 mask |= (1 << 10);
713 wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
714 }
715
716
717
718
719
720
721
722
723
724
725
726
727 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
728 value &= ~(1ULL << 24);
729 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
730 }
731
732 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
733}
734
735#ifdef CONFIG_X86_32
736static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
737 unsigned int size)
738{
739
740 if ((c->x86 == 6)) {
741
742 if (c->x86_model == 3 && c->x86_mask == 0)
743 size = 64;
744
745 if (c->x86_model == 4 &&
746 (c->x86_mask == 0 || c->x86_mask == 1))
747 size = 256;
748 }
749 return size;
750}
751#endif
752
753static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
754{
755 tlb_flushall_shift = 5;
756
757 if (c->x86 <= 0x11)
758 tlb_flushall_shift = 4;
759}
760
761static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
762{
763 u32 ebx, eax, ecx, edx;
764 u16 mask = 0xfff;
765
766 if (c->x86 < 0xf)
767 return;
768
769 if (c->extended_cpuid_level < 0x80000006)
770 return;
771
772 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
773
774 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
775 tlb_lli_4k[ENTRIES] = ebx & mask;
776
777
778
779
780
781 if (c->x86 == 0xf) {
782 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
783 mask = 0xff;
784 }
785
786
787 if (!((eax >> 16) & mask)) {
788 u32 a, b, c, d;
789
790 cpuid(0x80000005, &a, &b, &c, &d);
791 tlb_lld_2m[ENTRIES] = (a >> 16) & 0xff;
792 } else {
793 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
794 }
795
796
797 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
798
799
800 if (!(eax & mask)) {
801
802 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
803 tlb_lli_2m[ENTRIES] = 1024;
804 } else {
805 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
806 tlb_lli_2m[ENTRIES] = eax & 0xff;
807 }
808 } else
809 tlb_lli_2m[ENTRIES] = eax & mask;
810
811 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
812
813 cpu_set_tlb_flushall_shift(c);
814}
815
816static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
817 .c_vendor = "AMD",
818 .c_ident = { "AuthenticAMD" },
819#ifdef CONFIG_X86_32
820 .c_models = {
821 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
822 {
823 [3] = "486 DX/2",
824 [7] = "486 DX/2-WB",
825 [8] = "486 DX/4",
826 [9] = "486 DX/4-WB",
827 [14] = "Am5x86-WT",
828 [15] = "Am5x86-WB"
829 }
830 },
831 },
832 .c_size_cache = amd_size_cache,
833#endif
834 .c_early_init = early_init_amd,
835 .c_detect_tlb = cpu_detect_tlb_amd,
836 .c_bsp_init = bsp_init_amd,
837 .c_init = init_amd,
838 .c_x86_vendor = X86_VENDOR_AMD,
839};
840
841cpu_dev_register(amd_cpu_dev);
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861const int amd_erratum_400[] =
862 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
863 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
864EXPORT_SYMBOL_GPL(amd_erratum_400);
865
866const int amd_erratum_383[] =
867 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
868EXPORT_SYMBOL_GPL(amd_erratum_383);
869
870bool cpu_has_amd_erratum(const int *erratum)
871{
872 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
873 int osvw_id = *erratum++;
874 u32 range;
875 u32 ms;
876
877
878
879
880
881 if (cpu->x86 == 0)
882 cpu = &boot_cpu_data;
883
884 if (cpu->x86_vendor != X86_VENDOR_AMD)
885 return false;
886
887 if (osvw_id >= 0 && osvw_id < 65536 &&
888 cpu_has(cpu, X86_FEATURE_OSVW)) {
889 u64 osvw_len;
890
891 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
892 if (osvw_id < osvw_len) {
893 u64 osvw_bits;
894
895 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
896 osvw_bits);
897 return osvw_bits & (1ULL << (osvw_id & 0x3f));
898 }
899 }
900
901
902 ms = (cpu->x86_model << 4) | cpu->x86_mask;
903 while ((range = *erratum++))
904 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
905 (ms >= AMD_MODEL_RANGE_START(range)) &&
906 (ms <= AMD_MODEL_RANGE_END(range)))
907 return true;
908
909 return false;
910}
911
912EXPORT_SYMBOL_GPL(cpu_has_amd_erratum);
913