1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/types.h>
35
36#include <linux/stop_machine.h>
37#include <linux/kvm_para.h>
38#include <linux/uaccess.h>
39#include <linux/export.h>
40#include <linux/mutex.h>
41#include <linux/init.h>
42#include <linux/sort.h>
43#include <linux/cpu.h>
44#include <linux/pci.h>
45#include <linux/smp.h>
46#include <linux/syscore_ops.h>
47#include <linux/rcupdate.h>
48
49#include <asm/cpufeature.h>
50#include <asm/e820/api.h>
51#include <asm/mtrr.h>
52#include <asm/msr.h>
53#include <asm/memtype.h>
54
55#include "mtrr.h"
56
57
58#define MTRR_TO_PHYS_WC_OFFSET 1000
59
60u32 num_var_ranges;
61static bool __mtrr_enabled;
62
63static bool mtrr_enabled(void)
64{
65 return __mtrr_enabled;
66}
67
68unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
69static DEFINE_MUTEX(mtrr_mutex);
70
71u64 size_or_mask, size_and_mask;
72static bool mtrr_aps_delayed_init;
73
74static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
75
76const struct mtrr_ops *mtrr_if;
77
78static void set_mtrr(unsigned int reg, unsigned long base,
79 unsigned long size, mtrr_type type);
80
81void __init set_mtrr_ops(const struct mtrr_ops *ops)
82{
83 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
84 mtrr_ops[ops->vendor] = ops;
85}
86
87
88static int have_wrcomb(void)
89{
90 struct pci_dev *dev;
91
92 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
93 if (dev != NULL) {
94
95
96
97
98
99 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
100 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE &&
101 dev->revision <= 5) {
102 pr_info("Serverworks LE rev < 6 detected. Write-combining disabled.\n");
103 pci_dev_put(dev);
104 return 0;
105 }
106
107
108
109
110 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
111 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
112 pr_info("Intel 450NX MMC detected. Write-combining disabled.\n");
113 pci_dev_put(dev);
114 return 0;
115 }
116 pci_dev_put(dev);
117 }
118 return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
119}
120
121
122static void __init set_num_var_ranges(void)
123{
124 unsigned long config = 0, dummy;
125
126 if (use_intel())
127 rdmsr(MSR_MTRRcap, config, dummy);
128 else if (is_cpu(AMD) || is_cpu(HYGON))
129 config = 2;
130 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
131 config = 8;
132
133 num_var_ranges = config & 0xff;
134}
135
136static void __init init_table(void)
137{
138 int i, max;
139
140 max = num_var_ranges;
141 for (i = 0; i < max; i++)
142 mtrr_usage_table[i] = 1;
143}
144
145struct set_mtrr_data {
146 unsigned long smp_base;
147 unsigned long smp_size;
148 unsigned int smp_reg;
149 mtrr_type smp_type;
150};
151
152
153
154
155
156
157
158
159static int mtrr_rendezvous_handler(void *info)
160{
161 struct set_mtrr_data *data = info;
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176 if (data->smp_reg != ~0U) {
177 mtrr_if->set(data->smp_reg, data->smp_base,
178 data->smp_size, data->smp_type);
179 } else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
180 mtrr_if->set_all();
181 }
182 return 0;
183}
184
185static inline int types_compatible(mtrr_type type1, mtrr_type type2)
186{
187 return type1 == MTRR_TYPE_UNCACHABLE ||
188 type2 == MTRR_TYPE_UNCACHABLE ||
189 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
190 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
191}
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227static void
228set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
229{
230 struct set_mtrr_data data = { .smp_reg = reg,
231 .smp_base = base,
232 .smp_size = size,
233 .smp_type = type
234 };
235
236 stop_machine(mtrr_rendezvous_handler, &data, cpu_online_mask);
237}
238
239static void set_mtrr_cpuslocked(unsigned int reg, unsigned long base,
240 unsigned long size, mtrr_type type)
241{
242 struct set_mtrr_data data = { .smp_reg = reg,
243 .smp_base = base,
244 .smp_size = size,
245 .smp_type = type
246 };
247
248 stop_machine_cpuslocked(mtrr_rendezvous_handler, &data, cpu_online_mask);
249}
250
251static void set_mtrr_from_inactive_cpu(unsigned int reg, unsigned long base,
252 unsigned long size, mtrr_type type)
253{
254 struct set_mtrr_data data = { .smp_reg = reg,
255 .smp_base = base,
256 .smp_size = size,
257 .smp_type = type
258 };
259
260 stop_machine_from_inactive_cpu(mtrr_rendezvous_handler, &data,
261 cpu_callout_mask);
262}
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299int mtrr_add_page(unsigned long base, unsigned long size,
300 unsigned int type, bool increment)
301{
302 unsigned long lbase, lsize;
303 int i, replace, error;
304 mtrr_type ltype;
305
306 if (!mtrr_enabled())
307 return -ENXIO;
308
309 error = mtrr_if->validate_add_page(base, size, type);
310 if (error)
311 return error;
312
313 if (type >= MTRR_NUM_TYPES) {
314 pr_warn("type: %u invalid\n", type);
315 return -EINVAL;
316 }
317
318
319 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
320 pr_warn("your processor doesn't support write-combining\n");
321 return -ENOSYS;
322 }
323
324 if (!size) {
325 pr_warn("zero sized request\n");
326 return -EINVAL;
327 }
328
329 if ((base | (base + size - 1)) >>
330 (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
331 pr_warn("base or size exceeds the MTRR width\n");
332 return -EINVAL;
333 }
334
335 error = -EINVAL;
336 replace = -1;
337
338
339 cpus_read_lock();
340
341
342 mutex_lock(&mtrr_mutex);
343 for (i = 0; i < num_var_ranges; ++i) {
344 mtrr_if->get(i, &lbase, &lsize, <ype);
345 if (!lsize || base > lbase + lsize - 1 ||
346 base + size - 1 < lbase)
347 continue;
348
349
350
351
352 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
353 if (base <= lbase &&
354 base + size - 1 >= lbase + lsize - 1) {
355
356 if (type == ltype) {
357 replace = replace == -1 ? i : -2;
358 continue;
359 } else if (types_compatible(type, ltype))
360 continue;
361 }
362 pr_warn("0x%lx000,0x%lx000 overlaps existing 0x%lx000,0x%lx000\n", base, size, lbase,
363 lsize);
364 goto out;
365 }
366
367 if (ltype != type) {
368 if (types_compatible(type, ltype))
369 continue;
370 pr_warn("type mismatch for %lx000,%lx000 old: %s new: %s\n",
371 base, size, mtrr_attrib_to_str(ltype),
372 mtrr_attrib_to_str(type));
373 goto out;
374 }
375 if (increment)
376 ++mtrr_usage_table[i];
377 error = i;
378 goto out;
379 }
380
381 i = mtrr_if->get_free_region(base, size, replace);
382 if (i >= 0) {
383 set_mtrr_cpuslocked(i, base, size, type);
384 if (likely(replace < 0)) {
385 mtrr_usage_table[i] = 1;
386 } else {
387 mtrr_usage_table[i] = mtrr_usage_table[replace];
388 if (increment)
389 mtrr_usage_table[i]++;
390 if (unlikely(replace != i)) {
391 set_mtrr_cpuslocked(replace, 0, 0, 0);
392 mtrr_usage_table[replace] = 0;
393 }
394 }
395 } else {
396 pr_info("no more MTRRs available\n");
397 }
398 error = i;
399 out:
400 mutex_unlock(&mtrr_mutex);
401 cpus_read_unlock();
402 return error;
403}
404
405static int mtrr_check(unsigned long base, unsigned long size)
406{
407 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
408 pr_warn("size and base must be multiples of 4 kiB\n");
409 pr_debug("size: 0x%lx base: 0x%lx\n", size, base);
410 dump_stack();
411 return -1;
412 }
413 return 0;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
452 bool increment)
453{
454 if (!mtrr_enabled())
455 return -ENODEV;
456 if (mtrr_check(base, size))
457 return -EINVAL;
458 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
459 increment);
460}
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476int mtrr_del_page(int reg, unsigned long base, unsigned long size)
477{
478 int i, max;
479 mtrr_type ltype;
480 unsigned long lbase, lsize;
481 int error = -EINVAL;
482
483 if (!mtrr_enabled())
484 return -ENODEV;
485
486 max = num_var_ranges;
487
488 cpus_read_lock();
489 mutex_lock(&mtrr_mutex);
490 if (reg < 0) {
491
492 for (i = 0; i < max; ++i) {
493 mtrr_if->get(i, &lbase, &lsize, <ype);
494 if (lbase == base && lsize == size) {
495 reg = i;
496 break;
497 }
498 }
499 if (reg < 0) {
500 pr_debug("no MTRR for %lx000,%lx000 found\n",
501 base, size);
502 goto out;
503 }
504 }
505 if (reg >= max) {
506 pr_warn("register: %d too big\n", reg);
507 goto out;
508 }
509 mtrr_if->get(reg, &lbase, &lsize, <ype);
510 if (lsize < 1) {
511 pr_warn("MTRR %d not used\n", reg);
512 goto out;
513 }
514 if (mtrr_usage_table[reg] < 1) {
515 pr_warn("reg: %d has count=0\n", reg);
516 goto out;
517 }
518 if (--mtrr_usage_table[reg] < 1)
519 set_mtrr_cpuslocked(reg, 0, 0, 0);
520 error = reg;
521 out:
522 mutex_unlock(&mtrr_mutex);
523 cpus_read_unlock();
524 return error;
525}
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541int mtrr_del(int reg, unsigned long base, unsigned long size)
542{
543 if (!mtrr_enabled())
544 return -ENODEV;
545 if (mtrr_check(base, size))
546 return -EINVAL;
547 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
548}
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565int arch_phys_wc_add(unsigned long base, unsigned long size)
566{
567 int ret;
568
569 if (pat_enabled() || !mtrr_enabled())
570 return 0;
571
572 ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
573 if (ret < 0) {
574 pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
575 (void *)base, (void *)(base + size - 1));
576 return ret;
577 }
578 return ret + MTRR_TO_PHYS_WC_OFFSET;
579}
580EXPORT_SYMBOL(arch_phys_wc_add);
581
582
583
584
585
586
587
588
589
590
591void arch_phys_wc_del(int handle)
592{
593 if (handle >= 1) {
594 WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
595 mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
596 }
597}
598EXPORT_SYMBOL(arch_phys_wc_del);
599
600
601
602
603
604
605
606
607
608
609
610
611int arch_phys_wc_index(int handle)
612{
613 if (handle < MTRR_TO_PHYS_WC_OFFSET)
614 return -1;
615 else
616 return handle - MTRR_TO_PHYS_WC_OFFSET;
617}
618EXPORT_SYMBOL_GPL(arch_phys_wc_index);
619
620
621
622
623
624
625static void __init init_ifs(void)
626{
627#ifndef CONFIG_X86_64
628 amd_init_mtrr();
629 cyrix_init_mtrr();
630 centaur_init_mtrr();
631#endif
632}
633
634
635
636
637struct mtrr_value {
638 mtrr_type ltype;
639 unsigned long lbase;
640 unsigned long lsize;
641};
642
643static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
644
645static int mtrr_save(void)
646{
647 int i;
648
649 for (i = 0; i < num_var_ranges; i++) {
650 mtrr_if->get(i, &mtrr_value[i].lbase,
651 &mtrr_value[i].lsize,
652 &mtrr_value[i].ltype);
653 }
654 return 0;
655}
656
657static void mtrr_restore(void)
658{
659 int i;
660
661 for (i = 0; i < num_var_ranges; i++) {
662 if (mtrr_value[i].lsize) {
663 set_mtrr(i, mtrr_value[i].lbase,
664 mtrr_value[i].lsize,
665 mtrr_value[i].ltype);
666 }
667 }
668}
669
670
671
672static struct syscore_ops mtrr_syscore_ops = {
673 .suspend = mtrr_save,
674 .resume = mtrr_restore,
675};
676
677int __initdata changed_by_mtrr_cleanup;
678
679#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
680
681
682
683
684
685
686
687void __init mtrr_bp_init(void)
688{
689 u32 phys_addr;
690
691 init_ifs();
692
693 phys_addr = 32;
694
695 if (boot_cpu_has(X86_FEATURE_MTRR)) {
696 mtrr_if = &generic_mtrr_ops;
697 size_or_mask = SIZE_OR_MASK_BITS(36);
698 size_and_mask = 0x00f00000;
699 phys_addr = 36;
700
701
702
703
704
705
706 if (cpuid_eax(0x80000000) >= 0x80000008) {
707 phys_addr = cpuid_eax(0x80000008) & 0xff;
708
709 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
710 boot_cpu_data.x86 == 0xF &&
711 boot_cpu_data.x86_model == 0x3 &&
712 (boot_cpu_data.x86_stepping == 0x3 ||
713 boot_cpu_data.x86_stepping == 0x4))
714 phys_addr = 36;
715
716 size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
717 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
718 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
719 boot_cpu_data.x86 == 6) {
720
721
722
723
724 size_or_mask = SIZE_OR_MASK_BITS(32);
725 size_and_mask = 0;
726 phys_addr = 32;
727 }
728 } else {
729 switch (boot_cpu_data.x86_vendor) {
730 case X86_VENDOR_AMD:
731 if (cpu_feature_enabled(X86_FEATURE_K6_MTRR)) {
732
733 mtrr_if = mtrr_ops[X86_VENDOR_AMD];
734 size_or_mask = SIZE_OR_MASK_BITS(32);
735 size_and_mask = 0;
736 }
737 break;
738 case X86_VENDOR_CENTAUR:
739 if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR)) {
740 mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
741 size_or_mask = SIZE_OR_MASK_BITS(32);
742 size_and_mask = 0;
743 }
744 break;
745 case X86_VENDOR_CYRIX:
746 if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR)) {
747 mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
748 size_or_mask = SIZE_OR_MASK_BITS(32);
749 size_and_mask = 0;
750 }
751 break;
752 default:
753 break;
754 }
755 }
756
757 if (mtrr_if) {
758 __mtrr_enabled = true;
759 set_num_var_ranges();
760 init_table();
761 if (use_intel()) {
762
763 __mtrr_enabled = get_mtrr_state();
764
765 if (mtrr_enabled())
766 mtrr_bp_pat_init();
767
768 if (mtrr_cleanup(phys_addr)) {
769 changed_by_mtrr_cleanup = 1;
770 mtrr_if->set_all();
771 }
772 }
773 }
774
775 if (!mtrr_enabled()) {
776 pr_info("Disabled\n");
777
778
779
780
781
782
783 pat_disable("MTRRs disabled, skipping PAT initialization too.");
784 }
785}
786
787void mtrr_ap_init(void)
788{
789 if (!mtrr_enabled())
790 return;
791
792 if (!use_intel() || mtrr_aps_delayed_init)
793 return;
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808 set_mtrr_from_inactive_cpu(~0U, 0, 0, 0);
809}
810
811
812
813
814
815void mtrr_save_state(void)
816{
817 int first_cpu;
818
819 if (!mtrr_enabled())
820 return;
821
822 first_cpu = cpumask_first(cpu_online_mask);
823 smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
824}
825
826void set_mtrr_aps_delayed_init(void)
827{
828 if (!mtrr_enabled())
829 return;
830 if (!use_intel())
831 return;
832
833 mtrr_aps_delayed_init = true;
834}
835
836
837
838
839void mtrr_aps_init(void)
840{
841 if (!use_intel() || !mtrr_enabled())
842 return;
843
844
845
846
847
848
849 if (!mtrr_aps_delayed_init)
850 return;
851
852 set_mtrr(~0U, 0, 0, 0);
853 mtrr_aps_delayed_init = false;
854}
855
856void mtrr_bp_restore(void)
857{
858 if (!use_intel() || !mtrr_enabled())
859 return;
860
861 mtrr_if->set_all();
862}
863
864static int __init mtrr_init_finialize(void)
865{
866 if (!mtrr_enabled())
867 return 0;
868
869 if (use_intel()) {
870 if (!changed_by_mtrr_cleanup)
871 mtrr_state_warn();
872 return 0;
873 }
874
875
876
877
878
879
880
881
882
883 register_syscore_ops(&mtrr_syscore_ops);
884
885 return 0;
886}
887subsys_initcall(mtrr_init_finialize);
888