1#include <linux/export.h>
2#include <linux/bitops.h>
3#include <linux/elf.h>
4#include <linux/mm.h>
5
6#include <linux/io.h>
7#include <linux/sched.h>
8#include <linux/random.h>
9#include <asm/processor.h>
10#include <asm/apic.h>
11#include <asm/cpu.h>
12#include <asm/smp.h>
13#include <asm/pci-direct.h>
14
15#ifdef CONFIG_X86_64
16# include <asm/mmconfig.h>
17# include <asm/cacheflush.h>
18#endif
19
20#include "cpu.h"
21
22
23
24
25
26
27static u32 nodes_per_socket = 1;
28
29static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
30{
31 u32 gprs[8] = { 0 };
32 int err;
33
34 WARN_ONCE((boot_cpu_data.x86 != 0xf),
35 "%s should only be used on K8!\n", __func__);
36
37 gprs[1] = msr;
38 gprs[7] = 0x9c5a203a;
39
40 err = rdmsr_safe_regs(gprs);
41
42 *p = gprs[0] | ((u64)gprs[2] << 32);
43
44 return err;
45}
46
47static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
48{
49 u32 gprs[8] = { 0 };
50
51 WARN_ONCE((boot_cpu_data.x86 != 0xf),
52 "%s should only be used on K8!\n", __func__);
53
54 gprs[0] = (u32)val;
55 gprs[1] = msr;
56 gprs[2] = val >> 32;
57 gprs[7] = 0x9c5a203a;
58
59 return wrmsr_safe_regs(gprs);
60}
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76extern __visible void vide(void);
77__asm__(".globl vide\n\t.align 4\nvide: ret");
78
79static void init_amd_k5(struct cpuinfo_x86 *c)
80{
81#ifdef CONFIG_X86_32
82
83
84
85
86
87
88#define CBAR (0xfffc)
89#define CBAR_ENB (0x80000000)
90#define CBAR_KEY (0X000000CB)
91 if (c->x86_model == 9 || c->x86_model == 10) {
92 if (inl(CBAR) & CBAR_ENB)
93 outl(0 | CBAR_KEY, CBAR);
94 }
95#endif
96}
97
98static void init_amd_k6(struct cpuinfo_x86 *c)
99{
100#ifdef CONFIG_X86_32
101 u32 l, h;
102 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
103
104 if (c->x86_model < 6) {
105
106 if (c->x86_model == 0) {
107 clear_cpu_cap(c, X86_FEATURE_APIC);
108 set_cpu_cap(c, X86_FEATURE_PGE);
109 }
110 return;
111 }
112
113 if (c->x86_model == 6 && c->x86_mask == 1) {
114 const int K6_BUG_LOOP = 1000000;
115 int n;
116 void (*f_vide)(void);
117 unsigned long d, d2;
118
119 printk(KERN_INFO "AMD K6 stepping B detected - ");
120
121
122
123
124
125
126 n = K6_BUG_LOOP;
127 f_vide = vide;
128 rdtscl(d);
129 while (n--)
130 f_vide();
131 rdtscl(d2);
132 d = d2-d;
133
134 if (d > 20*K6_BUG_LOOP)
135 printk(KERN_CONT
136 "system stability may be impaired when more than 32 MB are used.\n");
137 else
138 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
139 }
140
141
142 if (c->x86_model < 8 ||
143 (c->x86_model == 8 && c->x86_mask < 8)) {
144
145 if (mbytes > 508)
146 mbytes = 508;
147
148 rdmsr(MSR_K6_WHCR, l, h);
149 if ((l&0x0000FFFF) == 0) {
150 unsigned long flags;
151 l = (1<<0)|((mbytes/4)<<1);
152 local_irq_save(flags);
153 wbinvd();
154 wrmsr(MSR_K6_WHCR, l, h);
155 local_irq_restore(flags);
156 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
157 mbytes);
158 }
159 return;
160 }
161
162 if ((c->x86_model == 8 && c->x86_mask > 7) ||
163 c->x86_model == 9 || c->x86_model == 13) {
164
165
166 if (mbytes > 4092)
167 mbytes = 4092;
168
169 rdmsr(MSR_K6_WHCR, l, h);
170 if ((l&0xFFFF0000) == 0) {
171 unsigned long flags;
172 l = ((mbytes>>2)<<22)|(1<<16);
173 local_irq_save(flags);
174 wbinvd();
175 wrmsr(MSR_K6_WHCR, l, h);
176 local_irq_restore(flags);
177 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
178 mbytes);
179 }
180
181 return;
182 }
183
184 if (c->x86_model == 10) {
185
186
187 return;
188 }
189#endif
190}
191
192static void init_amd_k7(struct cpuinfo_x86 *c)
193{
194#ifdef CONFIG_X86_32
195 u32 l, h;
196
197
198
199
200
201
202 if (c->x86_model >= 6 && c->x86_model <= 10) {
203 if (!cpu_has(c, X86_FEATURE_XMM)) {
204 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
205 msr_clear_bit(MSR_K7_HWCR, 15);
206 set_cpu_cap(c, X86_FEATURE_XMM);
207 }
208 }
209
210
211
212
213
214
215 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
216 rdmsr(MSR_K7_CLK_CTL, l, h);
217 if ((l & 0xfff00000) != 0x20000000) {
218 printk(KERN_INFO
219 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
220 l, ((l & 0x000fffff)|0x20000000));
221 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
222 }
223 }
224
225 set_cpu_cap(c, X86_FEATURE_K7);
226
227
228 if (!c->cpu_index)
229 return;
230
231
232
233
234
235
236 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
237 (c->x86_mask == 1)))
238 return;
239
240
241 if ((c->x86_model == 7) && (c->x86_mask == 0))
242 return;
243
244
245
246
247
248
249
250
251 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
252 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
253 (c->x86_model > 7))
254 if (cpu_has(c, X86_FEATURE_MP))
255 return;
256
257
258
259
260
261
262
263 WARN_ONCE(1, "WARNING: This combination of AMD"
264 " processors is not suitable for SMP.\n");
265 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
266#endif
267}
268
269#ifdef CONFIG_NUMA
270
271
272
273
274static int nearby_node(int apicid)
275{
276 int i, node;
277
278 for (i = apicid - 1; i >= 0; i--) {
279 node = __apicid_to_node[i];
280 if (node != NUMA_NO_NODE && node_online(node))
281 return node;
282 }
283 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
284 node = __apicid_to_node[i];
285 if (node != NUMA_NO_NODE && node_online(node))
286 return node;
287 }
288 return first_node(node_online_map);
289}
290#endif
291
292
293
294
295
296
297
298#ifdef CONFIG_SMP
299static void amd_get_topology(struct cpuinfo_x86 *c)
300{
301 u32 cores_per_cu = 1;
302 u8 node_id;
303 int cpu = smp_processor_id();
304
305
306 if (cpu_has_topoext) {
307 u32 eax, ebx, ecx, edx;
308
309 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
310 nodes_per_socket = ((ecx >> 8) & 7) + 1;
311 node_id = ecx & 7;
312
313
314 smp_num_siblings = ((ebx >> 8) & 3) + 1;
315 c->compute_unit_id = ebx & 0xff;
316 cores_per_cu += ((ebx >> 8) & 3);
317 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
318 u64 value;
319
320 rdmsrl(MSR_FAM10H_NODE_ID, value);
321 nodes_per_socket = ((value >> 3) & 7) + 1;
322 node_id = value & 7;
323 } else
324 return;
325
326
327 if (nodes_per_socket > 1) {
328 u32 cores_per_node;
329 u32 cus_per_node;
330
331 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
332 cores_per_node = c->x86_max_cores / nodes_per_socket;
333 cus_per_node = cores_per_node / cores_per_cu;
334
335
336 per_cpu(cpu_llc_id, cpu) = node_id;
337
338
339 c->cpu_core_id %= cores_per_node;
340 c->compute_unit_id %= cus_per_node;
341 }
342}
343#endif
344
345
346
347
348
349static void amd_detect_cmp(struct cpuinfo_x86 *c)
350{
351#ifdef CONFIG_SMP
352 unsigned bits;
353 int cpu = smp_processor_id();
354
355 bits = c->x86_coreid_bits;
356
357 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
358
359 c->phys_proc_id = c->initial_apicid >> bits;
360
361 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
362 amd_get_topology(c);
363#endif
364}
365
366u16 amd_get_nb_id(int cpu)
367{
368 u16 id = 0;
369#ifdef CONFIG_SMP
370 id = per_cpu(cpu_llc_id, cpu);
371#endif
372 return id;
373}
374EXPORT_SYMBOL_GPL(amd_get_nb_id);
375
376u32 amd_get_nodes_per_socket(void)
377{
378 return nodes_per_socket;
379}
380EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
381
382static void srat_detect_node(struct cpuinfo_x86 *c)
383{
384#ifdef CONFIG_NUMA
385 int cpu = smp_processor_id();
386 int node;
387 unsigned apicid = c->apicid;
388
389 node = numa_cpu_node(cpu);
390 if (node == NUMA_NO_NODE)
391 node = per_cpu(cpu_llc_id, cpu);
392
393
394
395
396
397
398 if (x86_cpuinit.fixup_cpu_id)
399 x86_cpuinit.fixup_cpu_id(c, node);
400
401 if (!node_online(node)) {
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421 int ht_nodeid = c->initial_apicid;
422
423 if (ht_nodeid >= 0 &&
424 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
425 node = __apicid_to_node[ht_nodeid];
426
427 if (!node_online(node))
428 node = nearby_node(apicid);
429 }
430 numa_set_node(cpu, node);
431#endif
432}
433
434static void early_init_amd_mc(struct cpuinfo_x86 *c)
435{
436#ifdef CONFIG_SMP
437 unsigned bits, ecx;
438
439
440 if (c->extended_cpuid_level < 0x80000008)
441 return;
442
443 ecx = cpuid_ecx(0x80000008);
444
445 c->x86_max_cores = (ecx & 0xff) + 1;
446
447
448 bits = (ecx >> 12) & 0xF;
449
450
451 if (bits == 0) {
452 while ((1 << bits) < c->x86_max_cores)
453 bits++;
454 }
455
456 c->x86_coreid_bits = bits;
457#endif
458}
459
460static void bsp_init_amd(struct cpuinfo_x86 *c)
461{
462
463#ifdef CONFIG_X86_64
464 if (c->x86 >= 0xf) {
465 unsigned long long tseg;
466
467
468
469
470
471
472 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
473 unsigned long pfn = tseg >> PAGE_SHIFT;
474
475 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
476 if (pfn_range_is_mapped(pfn, pfn + 1))
477 set_memory_4k((unsigned long)__va(tseg), 1);
478 }
479 }
480#endif
481
482 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
483
484 if (c->x86 > 0x10 ||
485 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
486 u64 val;
487
488 rdmsrl(MSR_K7_HWCR, val);
489 if (!(val & BIT(24)))
490 printk(KERN_WARNING FW_BUG "TSC doesn't count "
491 "with P0 frequency!\n");
492 }
493 }
494
495 if (c->x86 == 0x15) {
496 unsigned long upperbit;
497 u32 cpuid, assoc;
498
499 cpuid = cpuid_edx(0x80000005);
500 assoc = cpuid >> 16 & 0xff;
501 upperbit = ((cpuid >> 24) << 10) / assoc;
502
503 va_align.mask = (upperbit - 1) & PAGE_MASK;
504 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
505
506
507 va_align.bits = get_random_int() & va_align.mask;
508 }
509}
510
511static void early_init_amd(struct cpuinfo_x86 *c)
512{
513 early_init_amd_mc(c);
514
515
516
517
518
519 if (c->x86_power & (1 << 8)) {
520 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
521 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
522 if (!check_tsc_unstable())
523 set_sched_clock_stable();
524 }
525
526#ifdef CONFIG_X86_64
527 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
528#else
529
530 if (c->x86 == 5)
531 if (c->x86_model == 13 || c->x86_model == 9 ||
532 (c->x86_model == 8 && c->x86_mask >= 8))
533 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
534#endif
535#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
536
537
538
539
540
541
542 if (cpu_has_apic && c->x86 > 0x16) {
543 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
544 } else if (cpu_has_apic && c->x86 >= 0xf) {
545
546 unsigned int val;
547 val = read_pci_config(0, 24, 0, 0x68);
548 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
549 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
550 }
551#endif
552
553
554
555
556
557
558 set_cpu_cap(c, X86_FEATURE_VMMCALL);
559
560
561 if (c->x86 == 0x16 && c->x86_model <= 0xf)
562 msr_set_bit(MSR_AMD64_LS_CFG, 15);
563}
564
565static const int amd_erratum_383[];
566static const int amd_erratum_400[];
567static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
568
569static void init_amd_k8(struct cpuinfo_x86 *c)
570{
571 u32 level;
572 u64 value;
573
574
575 level = cpuid_eax(1);
576 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
577 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
578
579
580
581
582
583
584 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
585 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
586 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
587 value &= ~BIT_64(32);
588 wrmsrl_amd_safe(0xc001100d, value);
589 }
590 }
591
592 if (!c->x86_model_id[0])
593 strcpy(c->x86_model_id, "Hammer");
594
595#ifdef CONFIG_SMP
596
597
598
599
600
601
602
603 msr_set_bit(MSR_K7_HWCR, 6);
604#endif
605}
606
607static void init_amd_gh(struct cpuinfo_x86 *c)
608{
609#ifdef CONFIG_X86_64
610
611 if (c == &boot_cpu_data)
612 check_enable_amd_mmconf_dmi();
613
614 fam10h_check_enable_mmcfg();
615#endif
616
617
618
619
620
621
622
623
624
625 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
626
627
628
629
630
631
632
633
634
635
636 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
637
638 if (cpu_has_amd_erratum(c, amd_erratum_383))
639 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
640}
641
642static void init_amd_bd(struct cpuinfo_x86 *c)
643{
644 u64 value;
645
646
647 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
648 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
649
650 if (msr_set_bit(0xc0011005, 54) > 0) {
651 rdmsrl(0xc0011005, value);
652 if (value & BIT_64(54)) {
653 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
654 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
655 }
656 }
657 }
658
659
660
661
662
663 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
664 if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
665 value |= 0x1E;
666 wrmsrl_safe(0xc0011021, value);
667 }
668 }
669}
670
671static void init_amd(struct cpuinfo_x86 *c)
672{
673 u32 dummy;
674
675 early_init_amd(c);
676
677
678
679
680
681 clear_cpu_cap(c, 0*32+31);
682
683 if (c->x86 >= 0x10)
684 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
685
686
687 c->apicid = hard_smp_processor_id();
688
689
690 if (c->x86 < 6)
691 clear_cpu_cap(c, X86_FEATURE_MCE);
692
693 switch (c->x86) {
694 case 4: init_amd_k5(c); break;
695 case 5: init_amd_k6(c); break;
696 case 6: init_amd_k7(c); break;
697 case 0xf: init_amd_k8(c); break;
698 case 0x10: init_amd_gh(c); break;
699 case 0x15: init_amd_bd(c); break;
700 }
701
702
703 if (c->x86 >= 6)
704 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
705
706 cpu_detect_cache_sizes(c);
707
708
709 if (c->extended_cpuid_level >= 0x80000008) {
710 amd_detect_cmp(c);
711 srat_detect_node(c);
712 }
713
714#ifdef CONFIG_X86_32
715 detect_ht(c);
716#endif
717
718 init_amd_cacheinfo(c);
719
720 if (c->x86 >= 0xf)
721 set_cpu_cap(c, X86_FEATURE_K8);
722
723 if (cpu_has_xmm2) {
724
725 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
726 }
727
728
729
730
731
732 if (c->x86 > 0x11)
733 set_cpu_cap(c, X86_FEATURE_ARAT);
734
735 if (cpu_has_amd_erratum(c, amd_erratum_400))
736 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
737
738 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
739
740
741 if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
742 if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
743 set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
744
745
746 set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
747}
748
749#ifdef CONFIG_X86_32
750static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
751{
752
753 if ((c->x86 == 6)) {
754
755 if (c->x86_model == 3 && c->x86_mask == 0)
756 size = 64;
757
758 if (c->x86_model == 4 &&
759 (c->x86_mask == 0 || c->x86_mask == 1))
760 size = 256;
761 }
762 return size;
763}
764#endif
765
766static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
767{
768 u32 ebx, eax, ecx, edx;
769 u16 mask = 0xfff;
770
771 if (c->x86 < 0xf)
772 return;
773
774 if (c->extended_cpuid_level < 0x80000006)
775 return;
776
777 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
778
779 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
780 tlb_lli_4k[ENTRIES] = ebx & mask;
781
782
783
784
785
786 if (c->x86 == 0xf) {
787 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
788 mask = 0xff;
789 }
790
791
792 if (!((eax >> 16) & mask))
793 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
794 else
795 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
796
797
798 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
799
800
801 if (!(eax & mask)) {
802
803 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
804 tlb_lli_2m[ENTRIES] = 1024;
805 } else {
806 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
807 tlb_lli_2m[ENTRIES] = eax & 0xff;
808 }
809 } else
810 tlb_lli_2m[ENTRIES] = eax & mask;
811
812 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
813}
814
815static const struct cpu_dev amd_cpu_dev = {
816 .c_vendor = "AMD",
817 .c_ident = { "AuthenticAMD" },
818#ifdef CONFIG_X86_32
819 .legacy_models = {
820 { .family = 4, .model_names =
821 {
822 [3] = "486 DX/2",
823 [7] = "486 DX/2-WB",
824 [8] = "486 DX/4",
825 [9] = "486 DX/4-WB",
826 [14] = "Am5x86-WT",
827 [15] = "Am5x86-WB"
828 }
829 },
830 },
831 .legacy_cache_size = amd_size_cache,
832#endif
833 .c_early_init = early_init_amd,
834 .c_detect_tlb = cpu_detect_tlb_amd,
835 .c_bsp_init = bsp_init_amd,
836 .c_init = init_amd,
837 .c_x86_vendor = X86_VENDOR_AMD,
838};
839
840cpu_dev_register(amd_cpu_dev);
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
860#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
861#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
862 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
863#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
864#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
865#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
866
867static const int amd_erratum_400[] =
868 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
869 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
870
871static const int amd_erratum_383[] =
872 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
873
874
875static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
876{
877 int osvw_id = *erratum++;
878 u32 range;
879 u32 ms;
880
881 if (osvw_id >= 0 && osvw_id < 65536 &&
882 cpu_has(cpu, X86_FEATURE_OSVW)) {
883 u64 osvw_len;
884
885 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
886 if (osvw_id < osvw_len) {
887 u64 osvw_bits;
888
889 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
890 osvw_bits);
891 return osvw_bits & (1ULL << (osvw_id & 0x3f));
892 }
893 }
894
895
896 ms = (cpu->x86_model << 4) | cpu->x86_mask;
897 while ((range = *erratum++))
898 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
899 (ms >= AMD_MODEL_RANGE_START(range)) &&
900 (ms <= AMD_MODEL_RANGE_END(range)))
901 return true;
902
903 return false;
904}
905
906void set_dr_addr_mask(unsigned long mask, int dr)
907{
908 if (!cpu_has_bpext)
909 return;
910
911 switch (dr) {
912 case 0:
913 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
914 break;
915 case 1:
916 case 2:
917 case 3:
918 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
919 break;
920 default:
921 break;
922 }
923}
924