1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/seq_file.h>
35#include <linux/memblock.h>
36#include <linux/debugfs.h>
37#include <linux/ioport.h>
38#include <linux/kernel.h>
39#include <linux/pfn_t.h>
40#include <linux/slab.h>
41#include <linux/mm.h>
42#include <linux/fs.h>
43#include <linux/rbtree.h>
44
45#include <asm/cacheflush.h>
46#include <asm/processor.h>
47#include <asm/tlbflush.h>
48#include <asm/x86_init.h>
49#include <asm/fcntl.h>
50#include <asm/e820/api.h>
51#include <asm/mtrr.h>
52#include <asm/page.h>
53#include <asm/msr.h>
54#include <asm/memtype.h>
55#include <asm/io.h>
56
57#include "memtype.h"
58#include "../mm_internal.h"
59
60#undef pr_fmt
61#define pr_fmt(fmt) "" fmt
62
63static bool __read_mostly pat_bp_initialized;
64static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
65static bool __read_mostly pat_bp_enabled;
66static bool __read_mostly pat_cm_initialized;
67
68
69
70
71
72void pat_disable(const char *msg_reason)
73{
74 if (pat_disabled)
75 return;
76
77 if (pat_bp_initialized) {
78 WARN_ONCE(1, "x86/PAT: PAT cannot be disabled after initialization\n");
79 return;
80 }
81
82 pat_disabled = true;
83 pr_info("x86/PAT: %s\n", msg_reason);
84}
85
86static int __init nopat(char *str)
87{
88 pat_disable("PAT support disabled via boot option.");
89 return 0;
90}
91early_param("nopat", nopat);
92
93bool pat_enabled(void)
94{
95 return pat_bp_enabled;
96}
97EXPORT_SYMBOL_GPL(pat_enabled);
98
99int pat_debug_enable;
100
101static int __init pat_debug_setup(char *str)
102{
103 pat_debug_enable = 1;
104 return 0;
105}
106__setup("debugpat", pat_debug_setup);
107
108#ifdef CONFIG_X86_PAT
109
110
111
112
113
114
115
116
117
118
119
120
121
122#define _PGMT_WB 0
123#define _PGMT_WC (1UL << PG_arch_1)
124#define _PGMT_UC_MINUS (1UL << PG_uncached)
125#define _PGMT_WT (1UL << PG_uncached | 1UL << PG_arch_1)
126#define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
127#define _PGMT_CLEAR_MASK (~_PGMT_MASK)
128
129static inline enum page_cache_mode get_page_memtype(struct page *pg)
130{
131 unsigned long pg_flags = pg->flags & _PGMT_MASK;
132
133 if (pg_flags == _PGMT_WB)
134 return _PAGE_CACHE_MODE_WB;
135 else if (pg_flags == _PGMT_WC)
136 return _PAGE_CACHE_MODE_WC;
137 else if (pg_flags == _PGMT_UC_MINUS)
138 return _PAGE_CACHE_MODE_UC_MINUS;
139 else
140 return _PAGE_CACHE_MODE_WT;
141}
142
143static inline void set_page_memtype(struct page *pg,
144 enum page_cache_mode memtype)
145{
146 unsigned long memtype_flags;
147 unsigned long old_flags;
148 unsigned long new_flags;
149
150 switch (memtype) {
151 case _PAGE_CACHE_MODE_WC:
152 memtype_flags = _PGMT_WC;
153 break;
154 case _PAGE_CACHE_MODE_UC_MINUS:
155 memtype_flags = _PGMT_UC_MINUS;
156 break;
157 case _PAGE_CACHE_MODE_WT:
158 memtype_flags = _PGMT_WT;
159 break;
160 case _PAGE_CACHE_MODE_WB:
161 default:
162 memtype_flags = _PGMT_WB;
163 break;
164 }
165
166 do {
167 old_flags = pg->flags;
168 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
169 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
170}
171#else
172static inline enum page_cache_mode get_page_memtype(struct page *pg)
173{
174 return -1;
175}
176static inline void set_page_memtype(struct page *pg,
177 enum page_cache_mode memtype)
178{
179}
180#endif
181
182enum {
183 PAT_UC = 0,
184 PAT_WC = 1,
185 PAT_WT = 4,
186 PAT_WP = 5,
187 PAT_WB = 6,
188 PAT_UC_MINUS = 7,
189};
190
191#define CM(c) (_PAGE_CACHE_MODE_ ## c)
192
193static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
194{
195 enum page_cache_mode cache;
196 char *cache_mode;
197
198 switch (pat_val) {
199 case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
200 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
201 case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
202 case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
203 case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
204 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
205 default: cache = CM(WB); cache_mode = "WB "; break;
206 }
207
208 memcpy(msg, cache_mode, 4);
209
210 return cache;
211}
212
213#undef CM
214
215
216
217
218
219
220static void __init_cache_modes(u64 pat)
221{
222 enum page_cache_mode cache;
223 char pat_msg[33];
224 int i;
225
226 WARN_ON_ONCE(pat_cm_initialized);
227
228 pat_msg[32] = 0;
229 for (i = 7; i >= 0; i--) {
230 cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
231 pat_msg + 4 * i);
232 update_cache_mode_entry(i, cache);
233 }
234 pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
235
236 pat_cm_initialized = true;
237}
238
239#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
240
241static void pat_bp_init(u64 pat)
242{
243 u64 tmp_pat;
244
245 if (!boot_cpu_has(X86_FEATURE_PAT)) {
246 pat_disable("PAT not supported by the CPU.");
247 return;
248 }
249
250 rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
251 if (!tmp_pat) {
252 pat_disable("PAT support disabled by the firmware.");
253 return;
254 }
255
256 wrmsrl(MSR_IA32_CR_PAT, pat);
257 pat_bp_enabled = true;
258
259 __init_cache_modes(pat);
260}
261
262static void pat_ap_init(u64 pat)
263{
264 if (!boot_cpu_has(X86_FEATURE_PAT)) {
265
266
267
268
269 panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
270 }
271
272 wrmsrl(MSR_IA32_CR_PAT, pat);
273}
274
275void init_cache_modes(void)
276{
277 u64 pat = 0;
278
279 if (pat_cm_initialized)
280 return;
281
282 if (boot_cpu_has(X86_FEATURE_PAT)) {
283
284
285
286
287
288
289
290
291
292 rdmsrl(MSR_IA32_CR_PAT, pat);
293 }
294
295 if (!pat) {
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
315 PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
316 }
317
318 __init_cache_modes(pat);
319}
320
321
322
323
324
325
326
327
328
329
330
331void pat_init(void)
332{
333 u64 pat;
334 struct cpuinfo_x86 *c = &boot_cpu_data;
335
336#ifndef CONFIG_X86_PAT
337 pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n");
338#endif
339
340 if (pat_disabled)
341 return;
342
343 if ((c->x86_vendor == X86_VENDOR_INTEL) &&
344 (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
345 ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
367 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
368 } else {
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
396 PAT(4, WB) | PAT(5, WP) | PAT(6, UC_MINUS) | PAT(7, WT);
397 }
398
399 if (!pat_bp_initialized) {
400 pat_bp_init(pat);
401 pat_bp_initialized = true;
402 } else {
403 pat_ap_init(pat);
404 }
405}
406
407#undef PAT
408
409static DEFINE_SPINLOCK(memtype_lock);
410
411
412
413
414
415
416
417
418static unsigned long pat_x_mtrr_type(u64 start, u64 end,
419 enum page_cache_mode req_type)
420{
421
422
423
424
425 if (req_type == _PAGE_CACHE_MODE_WB) {
426 u8 mtrr_type, uniform;
427
428 mtrr_type = mtrr_type_lookup(start, end, &uniform);
429 if (mtrr_type != MTRR_TYPE_WRBACK)
430 return _PAGE_CACHE_MODE_UC_MINUS;
431
432 return _PAGE_CACHE_MODE_WB;
433 }
434
435 return req_type;
436}
437
438struct pagerange_state {
439 unsigned long cur_pfn;
440 int ram;
441 int not_ram;
442};
443
444static int
445pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
446{
447 struct pagerange_state *state = arg;
448
449 state->not_ram |= initial_pfn > state->cur_pfn;
450 state->ram |= total_nr_pages > 0;
451 state->cur_pfn = initial_pfn + total_nr_pages;
452
453 return state->ram && state->not_ram;
454}
455
456static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
457{
458 int ret = 0;
459 unsigned long start_pfn = start >> PAGE_SHIFT;
460 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
461 struct pagerange_state state = {start_pfn, 0, 0};
462
463
464
465
466
467
468
469
470 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
471 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
472
473 if (start_pfn < end_pfn) {
474 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
475 &state, pagerange_is_ram_callback);
476 }
477
478 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
479}
480
481
482
483
484
485
486
487
488
489
490
491
492static int reserve_ram_pages_type(u64 start, u64 end,
493 enum page_cache_mode req_type,
494 enum page_cache_mode *new_type)
495{
496 struct page *page;
497 u64 pfn;
498
499 if (req_type == _PAGE_CACHE_MODE_WP) {
500 if (new_type)
501 *new_type = _PAGE_CACHE_MODE_UC_MINUS;
502 return -EINVAL;
503 }
504
505 if (req_type == _PAGE_CACHE_MODE_UC) {
506
507 WARN_ON_ONCE(1);
508 req_type = _PAGE_CACHE_MODE_UC_MINUS;
509 }
510
511 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
512 enum page_cache_mode type;
513
514 page = pfn_to_page(pfn);
515 type = get_page_memtype(page);
516 if (type != _PAGE_CACHE_MODE_WB) {
517 pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
518 start, end - 1, type, req_type);
519 if (new_type)
520 *new_type = type;
521
522 return -EBUSY;
523 }
524 }
525
526 if (new_type)
527 *new_type = req_type;
528
529 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
530 page = pfn_to_page(pfn);
531 set_page_memtype(page, req_type);
532 }
533 return 0;
534}
535
536static int free_ram_pages_type(u64 start, u64 end)
537{
538 struct page *page;
539 u64 pfn;
540
541 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
542 page = pfn_to_page(pfn);
543 set_page_memtype(page, _PAGE_CACHE_MODE_WB);
544 }
545 return 0;
546}
547
548static u64 sanitize_phys(u64 address)
549{
550
551
552
553
554
555
556
557
558
559 if (IS_ENABLED(CONFIG_X86_64))
560 return address & __PHYSICAL_MASK;
561 return address;
562}
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
578 enum page_cache_mode *new_type)
579{
580 struct memtype *entry_new;
581 enum page_cache_mode actual_type;
582 int is_range_ram;
583 int err = 0;
584
585 start = sanitize_phys(start);
586
587
588
589
590
591 end = sanitize_phys(end - 1) + 1;
592 if (start >= end) {
593 WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
594 start, end - 1, cattr_name(req_type));
595 return -EINVAL;
596 }
597
598 if (!pat_enabled()) {
599
600 if (new_type)
601 *new_type = req_type;
602 return 0;
603 }
604
605
606 if (x86_platform.is_untracked_pat_range(start, end)) {
607 if (new_type)
608 *new_type = _PAGE_CACHE_MODE_WB;
609 return 0;
610 }
611
612
613
614
615
616
617
618 actual_type = pat_x_mtrr_type(start, end, req_type);
619
620 if (new_type)
621 *new_type = actual_type;
622
623 is_range_ram = pat_pagerange_is_ram(start, end);
624 if (is_range_ram == 1) {
625
626 err = reserve_ram_pages_type(start, end, req_type, new_type);
627
628 return err;
629 } else if (is_range_ram < 0) {
630 return -EINVAL;
631 }
632
633 entry_new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
634 if (!entry_new)
635 return -ENOMEM;
636
637 entry_new->start = start;
638 entry_new->end = end;
639 entry_new->type = actual_type;
640
641 spin_lock(&memtype_lock);
642
643 err = memtype_check_insert(entry_new, new_type);
644 if (err) {
645 pr_info("x86/PAT: memtype_reserve failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
646 start, end - 1,
647 cattr_name(entry_new->type), cattr_name(req_type));
648 kfree(entry_new);
649 spin_unlock(&memtype_lock);
650
651 return err;
652 }
653
654 spin_unlock(&memtype_lock);
655
656 dprintk("memtype_reserve added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
657 start, end - 1, cattr_name(entry_new->type), cattr_name(req_type),
658 new_type ? cattr_name(*new_type) : "-");
659
660 return err;
661}
662
663int memtype_free(u64 start, u64 end)
664{
665 int is_range_ram;
666 struct memtype *entry_old;
667
668 if (!pat_enabled())
669 return 0;
670
671 start = sanitize_phys(start);
672 end = sanitize_phys(end);
673
674
675 if (x86_platform.is_untracked_pat_range(start, end))
676 return 0;
677
678 is_range_ram = pat_pagerange_is_ram(start, end);
679 if (is_range_ram == 1)
680 return free_ram_pages_type(start, end);
681 if (is_range_ram < 0)
682 return -EINVAL;
683
684 spin_lock(&memtype_lock);
685 entry_old = memtype_erase(start, end);
686 spin_unlock(&memtype_lock);
687
688 if (IS_ERR(entry_old)) {
689 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
690 current->comm, current->pid, start, end - 1);
691 return -EINVAL;
692 }
693
694 kfree(entry_old);
695
696 dprintk("memtype_free request [mem %#010Lx-%#010Lx]\n", start, end - 1);
697
698 return 0;
699}
700
701
702
703
704
705
706
707
708
709
710
711static enum page_cache_mode lookup_memtype(u64 paddr)
712{
713 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
714 struct memtype *entry;
715
716 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
717 return rettype;
718
719 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
720 struct page *page;
721
722 page = pfn_to_page(paddr >> PAGE_SHIFT);
723 return get_page_memtype(page);
724 }
725
726 spin_lock(&memtype_lock);
727
728 entry = memtype_lookup(paddr);
729 if (entry != NULL)
730 rettype = entry->type;
731 else
732 rettype = _PAGE_CACHE_MODE_UC_MINUS;
733
734 spin_unlock(&memtype_lock);
735
736 return rettype;
737}
738
739
740
741
742
743
744
745
746
747
748bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
749{
750 enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn));
751
752 return cm == _PAGE_CACHE_MODE_UC ||
753 cm == _PAGE_CACHE_MODE_UC_MINUS ||
754 cm == _PAGE_CACHE_MODE_WC;
755}
756EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
757
758
759
760
761
762
763
764
765
766
767
768int memtype_reserve_io(resource_size_t start, resource_size_t end,
769 enum page_cache_mode *type)
770{
771 resource_size_t size = end - start;
772 enum page_cache_mode req_type = *type;
773 enum page_cache_mode new_type;
774 int ret;
775
776 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
777
778 ret = memtype_reserve(start, end, req_type, &new_type);
779 if (ret)
780 goto out_err;
781
782 if (!is_new_memtype_allowed(start, size, req_type, new_type))
783 goto out_free;
784
785 if (memtype_kernel_map_sync(start, size, new_type) < 0)
786 goto out_free;
787
788 *type = new_type;
789 return 0;
790
791out_free:
792 memtype_free(start, end);
793 ret = -EBUSY;
794out_err:
795 return ret;
796}
797
798
799
800
801
802
803void memtype_free_io(resource_size_t start, resource_size_t end)
804{
805 memtype_free(start, end);
806}
807
808#ifdef CONFIG_X86_PAT
809int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
810{
811 enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
812
813 return memtype_reserve_io(start, start + size, &type);
814}
815EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
816
817void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
818{
819 memtype_free_io(start, start + size);
820}
821EXPORT_SYMBOL(arch_io_free_memtype_wc);
822#endif
823
824pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
825 unsigned long size, pgprot_t vma_prot)
826{
827 if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size))
828 vma_prot = pgprot_decrypted(vma_prot);
829
830 return vma_prot;
831}
832
833#ifdef CONFIG_STRICT_DEVMEM
834
835static inline int range_is_allowed(unsigned long pfn, unsigned long size)
836{
837 return 1;
838}
839#else
840
841static inline int range_is_allowed(unsigned long pfn, unsigned long size)
842{
843 u64 from = ((u64)pfn) << PAGE_SHIFT;
844 u64 to = from + size;
845 u64 cursor = from;
846
847 if (!pat_enabled())
848 return 1;
849
850 while (cursor < to) {
851 if (!devmem_is_allowed(pfn))
852 return 0;
853 cursor += PAGE_SIZE;
854 pfn++;
855 }
856 return 1;
857}
858#endif
859
860int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
861 unsigned long size, pgprot_t *vma_prot)
862{
863 enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB;
864
865 if (!range_is_allowed(pfn, size))
866 return 0;
867
868 if (file->f_flags & O_DSYNC)
869 pcm = _PAGE_CACHE_MODE_UC_MINUS;
870
871 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
872 cachemode2protval(pcm));
873 return 1;
874}
875
876
877
878
879
880int memtype_kernel_map_sync(u64 base, unsigned long size,
881 enum page_cache_mode pcm)
882{
883 unsigned long id_sz;
884
885 if (base > __pa(high_memory-1))
886 return 0;
887
888
889
890
891
892 if (!page_is_ram(base >> PAGE_SHIFT))
893 return 0;
894
895 id_sz = (__pa(high_memory-1) <= base + size) ?
896 __pa(high_memory) - base : size;
897
898 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
899 pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
900 current->comm, current->pid,
901 cattr_name(pcm),
902 base, (unsigned long long)(base + size-1));
903 return -EINVAL;
904 }
905 return 0;
906}
907
908
909
910
911
912
913static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
914 int strict_prot)
915{
916 int is_ram = 0;
917 int ret;
918 enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot);
919 enum page_cache_mode pcm = want_pcm;
920
921 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
922
923
924
925
926
927
928 if (is_ram) {
929 if (!pat_enabled())
930 return 0;
931
932 pcm = lookup_memtype(paddr);
933 if (want_pcm != pcm) {
934 pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
935 current->comm, current->pid,
936 cattr_name(want_pcm),
937 (unsigned long long)paddr,
938 (unsigned long long)(paddr + size - 1),
939 cattr_name(pcm));
940 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
941 (~_PAGE_CACHE_MASK)) |
942 cachemode2protval(pcm));
943 }
944 return 0;
945 }
946
947 ret = memtype_reserve(paddr, paddr + size, want_pcm, &pcm);
948 if (ret)
949 return ret;
950
951 if (pcm != want_pcm) {
952 if (strict_prot ||
953 !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
954 memtype_free(paddr, paddr + size);
955 pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
956 current->comm, current->pid,
957 cattr_name(want_pcm),
958 (unsigned long long)paddr,
959 (unsigned long long)(paddr + size - 1),
960 cattr_name(pcm));
961 return -EINVAL;
962 }
963
964
965
966
967 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
968 (~_PAGE_CACHE_MASK)) |
969 cachemode2protval(pcm));
970 }
971
972 if (memtype_kernel_map_sync(paddr, size, pcm) < 0) {
973 memtype_free(paddr, paddr + size);
974 return -EINVAL;
975 }
976 return 0;
977}
978
979
980
981
982
983static void free_pfn_range(u64 paddr, unsigned long size)
984{
985 int is_ram;
986
987 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
988 if (is_ram == 0)
989 memtype_free(paddr, paddr + size);
990}
991
992
993
994
995
996
997
998
999int track_pfn_copy(struct vm_area_struct *vma)
1000{
1001 resource_size_t paddr;
1002 unsigned long prot;
1003 unsigned long vma_size = vma->vm_end - vma->vm_start;
1004 pgprot_t pgprot;
1005
1006 if (vma->vm_flags & VM_PAT) {
1007
1008
1009
1010
1011 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
1012 WARN_ON_ONCE(1);
1013 return -EINVAL;
1014 }
1015 pgprot = __pgprot(prot);
1016 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
1017 }
1018
1019 return 0;
1020}
1021
1022
1023
1024
1025
1026
1027
1028int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
1029 unsigned long pfn, unsigned long addr, unsigned long size)
1030{
1031 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
1032 enum page_cache_mode pcm;
1033
1034
1035 if (!vma || (addr == vma->vm_start
1036 && size == (vma->vm_end - vma->vm_start))) {
1037 int ret;
1038
1039 ret = reserve_pfn_range(paddr, size, prot, 0);
1040 if (ret == 0 && vma)
1041 vma->vm_flags |= VM_PAT;
1042 return ret;
1043 }
1044
1045 if (!pat_enabled())
1046 return 0;
1047
1048
1049
1050
1051
1052 pcm = lookup_memtype(paddr);
1053
1054
1055 while (size > PAGE_SIZE) {
1056 size -= PAGE_SIZE;
1057 paddr += PAGE_SIZE;
1058 if (pcm != lookup_memtype(paddr))
1059 return -EINVAL;
1060 }
1061
1062 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
1063 cachemode2protval(pcm));
1064
1065 return 0;
1066}
1067
1068void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
1069{
1070 enum page_cache_mode pcm;
1071
1072 if (!pat_enabled())
1073 return;
1074
1075
1076 pcm = lookup_memtype(pfn_t_to_phys(pfn));
1077 *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
1078 cachemode2protval(pcm));
1079}
1080
1081
1082
1083
1084
1085
1086void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
1087 unsigned long size)
1088{
1089 resource_size_t paddr;
1090 unsigned long prot;
1091
1092 if (vma && !(vma->vm_flags & VM_PAT))
1093 return;
1094
1095
1096 paddr = (resource_size_t)pfn << PAGE_SHIFT;
1097 if (!paddr && !size) {
1098 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
1099 WARN_ON_ONCE(1);
1100 return;
1101 }
1102
1103 size = vma->vm_end - vma->vm_start;
1104 }
1105 free_pfn_range(paddr, size);
1106 if (vma)
1107 vma->vm_flags &= ~VM_PAT;
1108}
1109
1110
1111
1112
1113
1114
1115void untrack_pfn_moved(struct vm_area_struct *vma)
1116{
1117 vma->vm_flags &= ~VM_PAT;
1118}
1119
1120pgprot_t pgprot_writecombine(pgprot_t prot)
1121{
1122 return __pgprot(pgprot_val(prot) |
1123 cachemode2protval(_PAGE_CACHE_MODE_WC));
1124}
1125EXPORT_SYMBOL_GPL(pgprot_writecombine);
1126
1127pgprot_t pgprot_writethrough(pgprot_t prot)
1128{
1129 return __pgprot(pgprot_val(prot) |
1130 cachemode2protval(_PAGE_CACHE_MODE_WT));
1131}
1132EXPORT_SYMBOL_GPL(pgprot_writethrough);
1133
1134#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
1135
1136
1137
1138
1139
1140static struct memtype *memtype_get_idx(loff_t pos)
1141{
1142 struct memtype *entry_print;
1143 int ret;
1144
1145 entry_print = kzalloc(sizeof(struct memtype), GFP_KERNEL);
1146 if (!entry_print)
1147 return NULL;
1148
1149 spin_lock(&memtype_lock);
1150 ret = memtype_copy_nth_element(entry_print, pos);
1151 spin_unlock(&memtype_lock);
1152
1153
1154 if (ret) {
1155 kfree(entry_print);
1156 return NULL;
1157 }
1158
1159 return entry_print;
1160}
1161
1162static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
1163{
1164 if (*pos == 0) {
1165 ++*pos;
1166 seq_puts(seq, "PAT memtype list:\n");
1167 }
1168
1169 return memtype_get_idx(*pos);
1170}
1171
1172static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1173{
1174 kfree(v);
1175 ++*pos;
1176 return memtype_get_idx(*pos);
1177}
1178
1179static void memtype_seq_stop(struct seq_file *seq, void *v)
1180{
1181 kfree(v);
1182}
1183
1184static int memtype_seq_show(struct seq_file *seq, void *v)
1185{
1186 struct memtype *entry_print = (struct memtype *)v;
1187
1188 seq_printf(seq, "PAT: [mem 0x%016Lx-0x%016Lx] %s\n",
1189 entry_print->start,
1190 entry_print->end,
1191 cattr_name(entry_print->type));
1192
1193 return 0;
1194}
1195
1196static const struct seq_operations memtype_seq_ops = {
1197 .start = memtype_seq_start,
1198 .next = memtype_seq_next,
1199 .stop = memtype_seq_stop,
1200 .show = memtype_seq_show,
1201};
1202
1203static int memtype_seq_open(struct inode *inode, struct file *file)
1204{
1205 return seq_open(file, &memtype_seq_ops);
1206}
1207
1208static const struct file_operations memtype_fops = {
1209 .open = memtype_seq_open,
1210 .read = seq_read,
1211 .llseek = seq_lseek,
1212 .release = seq_release,
1213};
1214
1215static int __init pat_memtype_list_init(void)
1216{
1217 if (pat_enabled()) {
1218 debugfs_create_file("pat_memtype_list", S_IRUSR,
1219 arch_debugfs_dir, NULL, &memtype_fops);
1220 }
1221 return 0;
1222}
1223late_initcall(pat_memtype_list_init);
1224
1225#endif
1226