1#include <linux/export.h>
2#include <linux/bitops.h>
3#include <linux/elf.h>
4#include <linux/mm.h>
5
6#include <linux/io.h>
7#include <linux/sched.h>
8#include <asm/processor.h>
9#include <asm/apic.h>
10#include <asm/cpu.h>
11#include <asm/smp.h>
12#include <asm/pci-direct.h>
13
14#ifdef CONFIG_X86_64
15# include <asm/mmconfig.h>
16# include <asm/cacheflush.h>
17#endif
18
19#include "cpu.h"
20
21static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
22{
23 u32 gprs[8] = { 0 };
24 int err;
25
26 WARN_ONCE((boot_cpu_data.x86 != 0xf),
27 "%s should only be used on K8!\n", __func__);
28
29 gprs[1] = msr;
30 gprs[7] = 0x9c5a203a;
31
32 err = rdmsr_safe_regs(gprs);
33
34 *p = gprs[0] | ((u64)gprs[2] << 32);
35
36 return err;
37}
38
39static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
40{
41 u32 gprs[8] = { 0 };
42
43 WARN_ONCE((boot_cpu_data.x86 != 0xf),
44 "%s should only be used on K8!\n", __func__);
45
46 gprs[0] = (u32)val;
47 gprs[1] = msr;
48 gprs[2] = val >> 32;
49 gprs[7] = 0x9c5a203a;
50
51 return wrmsr_safe_regs(gprs);
52}
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68extern __visible void vide(void);
69__asm__(".globl vide\n\t.align 4\nvide: ret");
70
71static void init_amd_k5(struct cpuinfo_x86 *c)
72{
73#ifdef CONFIG_X86_32
74
75
76
77
78
79
80#define CBAR (0xfffc)
81#define CBAR_ENB (0x80000000)
82#define CBAR_KEY (0X000000CB)
83 if (c->x86_model == 9 || c->x86_model == 10) {
84 if (inl(CBAR) & CBAR_ENB)
85 outl(0 | CBAR_KEY, CBAR);
86 }
87#endif
88}
89
90static void init_amd_k6(struct cpuinfo_x86 *c)
91{
92#ifdef CONFIG_X86_32
93 u32 l, h;
94 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
95
96 if (c->x86_model < 6) {
97
98 if (c->x86_model == 0) {
99 clear_cpu_cap(c, X86_FEATURE_APIC);
100 set_cpu_cap(c, X86_FEATURE_PGE);
101 }
102 return;
103 }
104
105 if (c->x86_model == 6 && c->x86_mask == 1) {
106 const int K6_BUG_LOOP = 1000000;
107 int n;
108 void (*f_vide)(void);
109 unsigned long d, d2;
110
111 printk(KERN_INFO "AMD K6 stepping B detected - ");
112
113
114
115
116
117
118 n = K6_BUG_LOOP;
119 f_vide = vide;
120 rdtscl(d);
121 while (n--)
122 f_vide();
123 rdtscl(d2);
124 d = d2-d;
125
126 if (d > 20*K6_BUG_LOOP)
127 printk(KERN_CONT
128 "system stability may be impaired when more than 32 MB are used.\n");
129 else
130 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
131 }
132
133
134 if (c->x86_model < 8 ||
135 (c->x86_model == 8 && c->x86_mask < 8)) {
136
137 if (mbytes > 508)
138 mbytes = 508;
139
140 rdmsr(MSR_K6_WHCR, l, h);
141 if ((l&0x0000FFFF) == 0) {
142 unsigned long flags;
143 l = (1<<0)|((mbytes/4)<<1);
144 local_irq_save(flags);
145 wbinvd();
146 wrmsr(MSR_K6_WHCR, l, h);
147 local_irq_restore(flags);
148 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
149 mbytes);
150 }
151 return;
152 }
153
154 if ((c->x86_model == 8 && c->x86_mask > 7) ||
155 c->x86_model == 9 || c->x86_model == 13) {
156
157
158 if (mbytes > 4092)
159 mbytes = 4092;
160
161 rdmsr(MSR_K6_WHCR, l, h);
162 if ((l&0xFFFF0000) == 0) {
163 unsigned long flags;
164 l = ((mbytes>>2)<<22)|(1<<16);
165 local_irq_save(flags);
166 wbinvd();
167 wrmsr(MSR_K6_WHCR, l, h);
168 local_irq_restore(flags);
169 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
170 mbytes);
171 }
172
173 return;
174 }
175
176 if (c->x86_model == 10) {
177
178
179 return;
180 }
181#endif
182}
183
184static void init_amd_k7(struct cpuinfo_x86 *c)
185{
186#ifdef CONFIG_X86_32
187 u32 l, h;
188
189
190
191
192
193
194 if (c->x86_model >= 6 && c->x86_model <= 10) {
195 if (!cpu_has(c, X86_FEATURE_XMM)) {
196 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
197 msr_clear_bit(MSR_K7_HWCR, 15);
198 set_cpu_cap(c, X86_FEATURE_XMM);
199 }
200 }
201
202
203
204
205
206
207 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
208 rdmsr(MSR_K7_CLK_CTL, l, h);
209 if ((l & 0xfff00000) != 0x20000000) {
210 printk(KERN_INFO
211 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
212 l, ((l & 0x000fffff)|0x20000000));
213 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
214 }
215 }
216
217 set_cpu_cap(c, X86_FEATURE_K7);
218
219
220 if (!c->cpu_index)
221 return;
222
223
224
225
226
227
228 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
229 (c->x86_mask == 1)))
230 return;
231
232
233 if ((c->x86_model == 7) && (c->x86_mask == 0))
234 return;
235
236
237
238
239
240
241
242
243 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
244 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
245 (c->x86_model > 7))
246 if (cpu_has(c, X86_FEATURE_MP))
247 return;
248
249
250
251
252
253
254
255 WARN_ONCE(1, "WARNING: This combination of AMD"
256 " processors is not suitable for SMP.\n");
257 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
258#endif
259}
260
261#ifdef CONFIG_NUMA
262
263
264
265
266static int nearby_node(int apicid)
267{
268 int i, node;
269
270 for (i = apicid - 1; i >= 0; i--) {
271 node = __apicid_to_node[i];
272 if (node != NUMA_NO_NODE && node_online(node))
273 return node;
274 }
275 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
276 node = __apicid_to_node[i];
277 if (node != NUMA_NO_NODE && node_online(node))
278 return node;
279 }
280 return first_node(node_online_map);
281}
282#endif
283
284
285
286
287
288
289
290#ifdef CONFIG_X86_HT
291static void amd_get_topology(struct cpuinfo_x86 *c)
292{
293 u32 nodes, cores_per_cu = 1;
294 u8 node_id;
295 int cpu = smp_processor_id();
296
297
298 if (cpu_has_topoext) {
299 u32 eax, ebx, ecx, edx;
300
301 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
302 nodes = ((ecx >> 8) & 7) + 1;
303 node_id = ecx & 7;
304
305
306 smp_num_siblings = ((ebx >> 8) & 3) + 1;
307 c->compute_unit_id = ebx & 0xff;
308 cores_per_cu += ((ebx >> 8) & 3);
309 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
310 u64 value;
311
312 rdmsrl(MSR_FAM10H_NODE_ID, value);
313 nodes = ((value >> 3) & 7) + 1;
314 node_id = value & 7;
315 } else
316 return;
317
318
319 if (nodes > 1) {
320 u32 cores_per_node;
321 u32 cus_per_node;
322
323 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
324 cores_per_node = c->x86_max_cores / nodes;
325 cus_per_node = cores_per_node / cores_per_cu;
326
327
328 per_cpu(cpu_llc_id, cpu) = node_id;
329
330
331 c->cpu_core_id %= cores_per_node;
332 c->compute_unit_id %= cus_per_node;
333 }
334}
335#endif
336
337
338
339
340
341static void amd_detect_cmp(struct cpuinfo_x86 *c)
342{
343#ifdef CONFIG_X86_HT
344 unsigned bits;
345 int cpu = smp_processor_id();
346
347 bits = c->x86_coreid_bits;
348
349 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
350
351 c->phys_proc_id = c->initial_apicid >> bits;
352
353 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
354 amd_get_topology(c);
355#endif
356}
357
358u16 amd_get_nb_id(int cpu)
359{
360 u16 id = 0;
361#ifdef CONFIG_SMP
362 id = per_cpu(cpu_llc_id, cpu);
363#endif
364 return id;
365}
366EXPORT_SYMBOL_GPL(amd_get_nb_id);
367
368static void srat_detect_node(struct cpuinfo_x86 *c)
369{
370#ifdef CONFIG_NUMA
371 int cpu = smp_processor_id();
372 int node;
373 unsigned apicid = c->apicid;
374
375 node = numa_cpu_node(cpu);
376 if (node == NUMA_NO_NODE)
377 node = per_cpu(cpu_llc_id, cpu);
378
379
380
381
382
383
384 if (x86_cpuinit.fixup_cpu_id)
385 x86_cpuinit.fixup_cpu_id(c, node);
386
387 if (!node_online(node)) {
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407 int ht_nodeid = c->initial_apicid;
408
409 if (ht_nodeid >= 0 &&
410 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
411 node = __apicid_to_node[ht_nodeid];
412
413 if (!node_online(node))
414 node = nearby_node(apicid);
415 }
416 numa_set_node(cpu, node);
417#endif
418}
419
420static void early_init_amd_mc(struct cpuinfo_x86 *c)
421{
422#ifdef CONFIG_X86_HT
423 unsigned bits, ecx;
424
425
426 if (c->extended_cpuid_level < 0x80000008)
427 return;
428
429 ecx = cpuid_ecx(0x80000008);
430
431 c->x86_max_cores = (ecx & 0xff) + 1;
432
433
434 bits = (ecx >> 12) & 0xF;
435
436
437 if (bits == 0) {
438 while ((1 << bits) < c->x86_max_cores)
439 bits++;
440 }
441
442 c->x86_coreid_bits = bits;
443#endif
444}
445
446static void bsp_init_amd(struct cpuinfo_x86 *c)
447{
448
449#ifdef CONFIG_X86_64
450 if (c->x86 >= 0xf) {
451 unsigned long long tseg;
452
453
454
455
456
457
458 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
459 unsigned long pfn = tseg >> PAGE_SHIFT;
460
461 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
462 if (pfn_range_is_mapped(pfn, pfn + 1))
463 set_memory_4k((unsigned long)__va(tseg), 1);
464 }
465 }
466#endif
467
468 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
469
470 if (c->x86 > 0x10 ||
471 (c->x86 == 0x10 && c->x86_model >= 0x2)) {
472 u64 val;
473
474 rdmsrl(MSR_K7_HWCR, val);
475 if (!(val & BIT(24)))
476 printk(KERN_WARNING FW_BUG "TSC doesn't count "
477 "with P0 frequency!\n");
478 }
479 }
480
481 if (c->x86 == 0x15) {
482 unsigned long upperbit;
483 u32 cpuid, assoc;
484
485 cpuid = cpuid_edx(0x80000005);
486 assoc = cpuid >> 16 & 0xff;
487 upperbit = ((cpuid >> 24) << 10) / assoc;
488
489 va_align.mask = (upperbit - 1) & PAGE_MASK;
490 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
491 }
492}
493
494static void early_init_amd(struct cpuinfo_x86 *c)
495{
496 early_init_amd_mc(c);
497
498
499
500
501
502 if (c->x86_power & (1 << 8)) {
503 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
504 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
505 if (!check_tsc_unstable())
506 set_sched_clock_stable();
507 }
508
509#ifdef CONFIG_X86_64
510 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
511#else
512
513 if (c->x86 == 5)
514 if (c->x86_model == 13 || c->x86_model == 9 ||
515 (c->x86_model == 8 && c->x86_mask >= 8))
516 set_cpu_cap(c, X86_FEATURE_K6_MTRR);
517#endif
518#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
519
520 if (cpu_has_apic && c->x86 >= 0xf) {
521 unsigned int val;
522 val = read_pci_config(0, 24, 0, 0x68);
523 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
524 set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
525 }
526#endif
527
528
529
530
531
532
533 set_cpu_cap(c, X86_FEATURE_VMMCALL);
534
535
536 if (c->x86 == 0x16 && c->x86_model <= 0xf)
537 msr_set_bit(MSR_AMD64_LS_CFG, 15);
538}
539
540static const int amd_erratum_383[];
541static const int amd_erratum_400[];
542static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
543
544static void init_amd_k8(struct cpuinfo_x86 *c)
545{
546 u32 level;
547 u64 value;
548
549
550 level = cpuid_eax(1);
551 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
552 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
553
554
555
556
557
558
559 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
560 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
561 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
562 value &= ~BIT_64(32);
563 wrmsrl_amd_safe(0xc001100d, value);
564 }
565 }
566
567 if (!c->x86_model_id[0])
568 strcpy(c->x86_model_id, "Hammer");
569
570#ifdef CONFIG_SMP
571
572
573
574
575
576
577
578 msr_set_bit(MSR_K7_HWCR, 6);
579#endif
580}
581
582static void init_amd_gh(struct cpuinfo_x86 *c)
583{
584#ifdef CONFIG_X86_64
585
586 if (c == &boot_cpu_data)
587 check_enable_amd_mmconf_dmi();
588
589 fam10h_check_enable_mmcfg();
590#endif
591
592
593
594
595
596
597
598
599
600 msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
601
602
603
604
605
606
607
608
609
610
611 msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
612
613 if (cpu_has_amd_erratum(c, amd_erratum_383))
614 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
615}
616
617static void init_amd_bd(struct cpuinfo_x86 *c)
618{
619 u64 value;
620
621
622 if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
623 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
624
625 if (msr_set_bit(0xc0011005, 54) > 0) {
626 rdmsrl(0xc0011005, value);
627 if (value & BIT_64(54)) {
628 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
629 pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
630 }
631 }
632 }
633
634
635
636
637
638 if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
639 if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
640 value |= 0x1E;
641 wrmsrl_safe(0xc0011021, value);
642 }
643 }
644}
645
646static void init_amd(struct cpuinfo_x86 *c)
647{
648 u32 dummy;
649
650 early_init_amd(c);
651
652
653
654
655
656 clear_cpu_cap(c, 0*32+31);
657
658 if (c->x86 >= 0x10)
659 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
660
661
662 c->apicid = hard_smp_processor_id();
663
664
665 if (c->x86 < 6)
666 clear_cpu_cap(c, X86_FEATURE_MCE);
667
668 switch (c->x86) {
669 case 4: init_amd_k5(c); break;
670 case 5: init_amd_k6(c); break;
671 case 6: init_amd_k7(c); break;
672 case 0xf: init_amd_k8(c); break;
673 case 0x10: init_amd_gh(c); break;
674 case 0x15: init_amd_bd(c); break;
675 }
676
677
678 if (c->x86 >= 6)
679 set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
680
681 cpu_detect_cache_sizes(c);
682
683
684 if (c->extended_cpuid_level >= 0x80000008) {
685 amd_detect_cmp(c);
686 srat_detect_node(c);
687 }
688
689#ifdef CONFIG_X86_32
690 detect_ht(c);
691#endif
692
693 init_amd_cacheinfo(c);
694
695 if (c->x86 >= 0xf)
696 set_cpu_cap(c, X86_FEATURE_K8);
697
698 if (cpu_has_xmm2) {
699
700 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
701 }
702
703
704
705
706
707 if (c->x86 > 0x11)
708 set_cpu_cap(c, X86_FEATURE_ARAT);
709
710 if (cpu_has_amd_erratum(c, amd_erratum_400))
711 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
712
713 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
714}
715
716#ifdef CONFIG_X86_32
717static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
718{
719
720 if ((c->x86 == 6)) {
721
722 if (c->x86_model == 3 && c->x86_mask == 0)
723 size = 64;
724
725 if (c->x86_model == 4 &&
726 (c->x86_mask == 0 || c->x86_mask == 1))
727 size = 256;
728 }
729 return size;
730}
731#endif
732
733static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
734{
735 u32 ebx, eax, ecx, edx;
736 u16 mask = 0xfff;
737
738 if (c->x86 < 0xf)
739 return;
740
741 if (c->extended_cpuid_level < 0x80000006)
742 return;
743
744 cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
745
746 tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
747 tlb_lli_4k[ENTRIES] = ebx & mask;
748
749
750
751
752
753 if (c->x86 == 0xf) {
754 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
755 mask = 0xff;
756 }
757
758
759 if (!((eax >> 16) & mask))
760 tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
761 else
762 tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
763
764
765 tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
766
767
768 if (!(eax & mask)) {
769
770 if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
771 tlb_lli_2m[ENTRIES] = 1024;
772 } else {
773 cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
774 tlb_lli_2m[ENTRIES] = eax & 0xff;
775 }
776 } else
777 tlb_lli_2m[ENTRIES] = eax & mask;
778
779 tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
780}
781
782static const struct cpu_dev amd_cpu_dev = {
783 .c_vendor = "AMD",
784 .c_ident = { "AuthenticAMD" },
785#ifdef CONFIG_X86_32
786 .legacy_models = {
787 { .family = 4, .model_names =
788 {
789 [3] = "486 DX/2",
790 [7] = "486 DX/2-WB",
791 [8] = "486 DX/4",
792 [9] = "486 DX/4-WB",
793 [14] = "Am5x86-WT",
794 [15] = "Am5x86-WB"
795 }
796 },
797 },
798 .legacy_cache_size = amd_size_cache,
799#endif
800 .c_early_init = early_init_amd,
801 .c_detect_tlb = cpu_detect_tlb_amd,
802 .c_bsp_init = bsp_init_amd,
803 .c_init = init_amd,
804 .c_x86_vendor = X86_VENDOR_AMD,
805};
806
807cpu_dev_register(amd_cpu_dev);
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
827#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
828#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
829 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
830#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
831#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
832#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
833
834static const int amd_erratum_400[] =
835 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
836 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
837
838static const int amd_erratum_383[] =
839 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
840
841
842static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
843{
844 int osvw_id = *erratum++;
845 u32 range;
846 u32 ms;
847
848 if (osvw_id >= 0 && osvw_id < 65536 &&
849 cpu_has(cpu, X86_FEATURE_OSVW)) {
850 u64 osvw_len;
851
852 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
853 if (osvw_id < osvw_len) {
854 u64 osvw_bits;
855
856 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
857 osvw_bits);
858 return osvw_bits & (1ULL << (osvw_id & 0x3f));
859 }
860 }
861
862
863 ms = (cpu->x86_model << 4) | cpu->x86_mask;
864 while ((range = *erratum++))
865 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
866 (ms >= AMD_MODEL_RANGE_START(range)) &&
867 (ms <= AMD_MODEL_RANGE_END(range)))
868 return true;
869
870 return false;
871}
872
873void set_dr_addr_mask(unsigned long mask, int dr)
874{
875 if (!cpu_has_bpext)
876 return;
877
878 switch (dr) {
879 case 0:
880 wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
881 break;
882 case 1:
883 case 2:
884 case 3:
885 wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
886 break;
887 default:
888 break;
889 }
890}
891