1
2
3
4
5
6#include <linux/highmem.h>
7#include <linux/memblock.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/interrupt.h>
11#include <linux/seq_file.h>
12#include <linux/debugfs.h>
13#include <linux/pfn.h>
14#include <linux/percpu.h>
15#include <linux/gfp.h>
16#include <linux/pci.h>
17#include <linux/vmalloc.h>
18
19#include <asm/e820/api.h>
20#include <asm/processor.h>
21#include <asm/tlbflush.h>
22#include <asm/sections.h>
23#include <asm/setup.h>
24#include <linux/uaccess.h>
25#include <asm/pgalloc.h>
26#include <asm/proto.h>
27#include <asm/memtype.h>
28#include <asm/set_memory.h>
29
30#include "../mm_internal.h"
31
32
33
34
35struct cpa_data {
36 unsigned long *vaddr;
37 pgd_t *pgd;
38 pgprot_t mask_set;
39 pgprot_t mask_clr;
40 unsigned long numpages;
41 unsigned long curpage;
42 unsigned long pfn;
43 unsigned int flags;
44 unsigned int force_split : 1,
45 force_static_prot : 1,
46 force_flush_all : 1;
47 struct page **pages;
48};
49
50enum cpa_warn {
51 CPA_CONFLICT,
52 CPA_PROTECT,
53 CPA_DETECT,
54};
55
56static const int cpa_warn_level = CPA_PROTECT;
57
58
59
60
61
62
63
64static DEFINE_SPINLOCK(cpa_lock);
65
66#define CPA_FLUSHTLB 1
67#define CPA_ARRAY 2
68#define CPA_PAGES_ARRAY 4
69
70#ifdef CONFIG_PROC_FS
71static unsigned long direct_pages_count[PG_LEVEL_NUM];
72
73void update_page_count(int level, unsigned long pages)
74{
75
76 spin_lock(&pgd_lock);
77 direct_pages_count[level] += pages;
78 spin_unlock(&pgd_lock);
79}
80
81static void split_page_count(int level)
82{
83 if (direct_pages_count[level] == 0)
84 return;
85
86 direct_pages_count[level]--;
87 direct_pages_count[level - 1] += PTRS_PER_PTE;
88}
89
90void arch_report_meminfo(struct seq_file *m)
91{
92 seq_printf(m, "DirectMap4k: %8lu kB\n",
93 direct_pages_count[PG_LEVEL_4K] << 2);
94#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
95 seq_printf(m, "DirectMap2M: %8lu kB\n",
96 direct_pages_count[PG_LEVEL_2M] << 11);
97#else
98 seq_printf(m, "DirectMap4M: %8lu kB\n",
99 direct_pages_count[PG_LEVEL_2M] << 12);
100#endif
101 if (direct_gbpages)
102 seq_printf(m, "DirectMap1G: %8lu kB\n",
103 direct_pages_count[PG_LEVEL_1G] << 20);
104}
105#else
106static inline void split_page_count(int level) { }
107#endif
108
109#ifdef CONFIG_X86_CPA_STATISTICS
110
111static unsigned long cpa_1g_checked;
112static unsigned long cpa_1g_sameprot;
113static unsigned long cpa_1g_preserved;
114static unsigned long cpa_2m_checked;
115static unsigned long cpa_2m_sameprot;
116static unsigned long cpa_2m_preserved;
117static unsigned long cpa_4k_install;
118
119static inline void cpa_inc_1g_checked(void)
120{
121 cpa_1g_checked++;
122}
123
124static inline void cpa_inc_2m_checked(void)
125{
126 cpa_2m_checked++;
127}
128
129static inline void cpa_inc_4k_install(void)
130{
131 cpa_4k_install++;
132}
133
134static inline void cpa_inc_lp_sameprot(int level)
135{
136 if (level == PG_LEVEL_1G)
137 cpa_1g_sameprot++;
138 else
139 cpa_2m_sameprot++;
140}
141
142static inline void cpa_inc_lp_preserved(int level)
143{
144 if (level == PG_LEVEL_1G)
145 cpa_1g_preserved++;
146 else
147 cpa_2m_preserved++;
148}
149
150static int cpastats_show(struct seq_file *m, void *p)
151{
152 seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked);
153 seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot);
154 seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved);
155 seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked);
156 seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot);
157 seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved);
158 seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install);
159 return 0;
160}
161
162static int cpastats_open(struct inode *inode, struct file *file)
163{
164 return single_open(file, cpastats_show, NULL);
165}
166
167static const struct file_operations cpastats_fops = {
168 .open = cpastats_open,
169 .read = seq_read,
170 .llseek = seq_lseek,
171 .release = single_release,
172};
173
174static int __init cpa_stats_init(void)
175{
176 debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL,
177 &cpastats_fops);
178 return 0;
179}
180late_initcall(cpa_stats_init);
181#else
182static inline void cpa_inc_1g_checked(void) { }
183static inline void cpa_inc_2m_checked(void) { }
184static inline void cpa_inc_4k_install(void) { }
185static inline void cpa_inc_lp_sameprot(int level) { }
186static inline void cpa_inc_lp_preserved(int level) { }
187#endif
188
189
190static inline int
191within(unsigned long addr, unsigned long start, unsigned long end)
192{
193 return addr >= start && addr < end;
194}
195
196static inline int
197within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
198{
199 return addr >= start && addr <= end;
200}
201
202#ifdef CONFIG_X86_64
203
204static inline unsigned long highmap_start_pfn(void)
205{
206 return __pa_symbol(_text) >> PAGE_SHIFT;
207}
208
209static inline unsigned long highmap_end_pfn(void)
210{
211
212 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
213}
214
215static bool __cpa_pfn_in_highmap(unsigned long pfn)
216{
217
218
219
220
221 return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
222}
223
224#else
225
226static bool __cpa_pfn_in_highmap(unsigned long pfn)
227{
228
229 return false;
230}
231
232#endif
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248static inline unsigned long fix_addr(unsigned long addr)
249{
250#ifdef CONFIG_X86_64
251 return (long)(addr << 1) >> 1;
252#else
253 return addr;
254#endif
255}
256
257static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
258{
259 if (cpa->flags & CPA_PAGES_ARRAY) {
260 struct page *page = cpa->pages[idx];
261
262 if (unlikely(PageHighMem(page)))
263 return 0;
264
265 return (unsigned long)page_address(page);
266 }
267
268 if (cpa->flags & CPA_ARRAY)
269 return cpa->vaddr[idx];
270
271 return *cpa->vaddr + idx * PAGE_SIZE;
272}
273
274
275
276
277
278static void clflush_cache_range_opt(void *vaddr, unsigned int size)
279{
280 const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
281 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
282 void *vend = vaddr + size;
283
284 if (p >= vend)
285 return;
286
287 for (; p < vend; p += clflush_size)
288 clflushopt(p);
289}
290
291
292
293
294
295
296
297
298
299void clflush_cache_range(void *vaddr, unsigned int size)
300{
301 mb();
302 clflush_cache_range_opt(vaddr, size);
303 mb();
304}
305EXPORT_SYMBOL_GPL(clflush_cache_range);
306
307void arch_invalidate_pmem(void *addr, size_t size)
308{
309 clflush_cache_range(addr, size);
310}
311EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
312
313static void __cpa_flush_all(void *arg)
314{
315 unsigned long cache = (unsigned long)arg;
316
317
318
319
320
321 __flush_tlb_all();
322
323 if (cache && boot_cpu_data.x86 >= 4)
324 wbinvd();
325}
326
327static void cpa_flush_all(unsigned long cache)
328{
329 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
330
331 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
332}
333
334void __cpa_flush_tlb(void *data)
335{
336 struct cpa_data *cpa = data;
337 unsigned int i;
338
339 for (i = 0; i < cpa->numpages; i++)
340 __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
341}
342
343static void cpa_flush(struct cpa_data *data, int cache)
344{
345 struct cpa_data *cpa = data;
346 unsigned int i;
347
348 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
349
350 if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
351 cpa_flush_all(cache);
352 return;
353 }
354
355 if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
356 flush_tlb_all();
357 else
358 on_each_cpu(__cpa_flush_tlb, cpa, 1);
359
360 if (!cache)
361 return;
362
363 mb();
364 for (i = 0; i < cpa->numpages; i++) {
365 unsigned long addr = __cpa_addr(cpa, i);
366 unsigned int level;
367
368 pte_t *pte = lookup_address(addr, &level);
369
370
371
372
373 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
374 clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
375 }
376 mb();
377}
378
379static bool overlaps(unsigned long r1_start, unsigned long r1_end,
380 unsigned long r2_start, unsigned long r2_end)
381{
382 return (r1_start <= r2_end && r1_end >= r2_start) ||
383 (r2_start <= r1_end && r2_end >= r1_start);
384}
385
386#ifdef CONFIG_PCI_BIOS
387
388
389
390
391#define BIOS_PFN PFN_DOWN(BIOS_BEGIN)
392#define BIOS_PFN_END PFN_DOWN(BIOS_END - 1)
393
394static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
395{
396 if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END))
397 return _PAGE_NX;
398 return 0;
399}
400#else
401static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
402{
403 return 0;
404}
405#endif
406
407
408
409
410
411
412static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn)
413{
414 unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata));
415
416
417
418
419
420 epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1;
421
422 if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro))
423 return _PAGE_RW;
424 return 0;
425}
426
427
428
429
430
431
432
433
434
435static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end)
436{
437 unsigned long t_end = (unsigned long)_etext - 1;
438 unsigned long t_start = (unsigned long)_text;
439
440 if (overlaps(start, end, t_start, t_end))
441 return _PAGE_NX;
442 return 0;
443}
444
445#if defined(CONFIG_X86_64)
446
447
448
449
450
451
452
453
454
455static pgprotval_t protect_kernel_text_ro(unsigned long start,
456 unsigned long end)
457{
458 unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1;
459 unsigned long t_start = (unsigned long)_text;
460 unsigned int level;
461
462 if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end))
463 return 0;
464
465
466
467
468
469
470
471
472
473
474
475
476 if (lookup_address(start, &level) && (level != PG_LEVEL_4K))
477 return _PAGE_RW;
478 return 0;
479}
480#else
481static pgprotval_t protect_kernel_text_ro(unsigned long start,
482 unsigned long end)
483{
484 return 0;
485}
486#endif
487
488static inline bool conflicts(pgprot_t prot, pgprotval_t val)
489{
490 return (pgprot_val(prot) & ~val) != pgprot_val(prot);
491}
492
493static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
494 unsigned long start, unsigned long end,
495 unsigned long pfn, const char *txt)
496{
497 static const char *lvltxt[] = {
498 [CPA_CONFLICT] = "conflict",
499 [CPA_PROTECT] = "protect",
500 [CPA_DETECT] = "detect",
501 };
502
503 if (warnlvl > cpa_warn_level || !conflicts(prot, val))
504 return;
505
506 pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n",
507 lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
508 (unsigned long long)val);
509}
510
511
512
513
514
515
516
517static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
518 unsigned long pfn, unsigned long npg,
519 unsigned long lpsize, int warnlvl)
520{
521 pgprotval_t forbidden, res;
522 unsigned long end;
523
524
525
526
527
528 if (!(pgprot_val(prot) & _PAGE_PRESENT))
529 return prot;
530
531
532 end = start + npg * PAGE_SIZE - 1;
533
534 res = protect_kernel_text(start, end);
535 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
536 forbidden = res;
537
538
539
540
541
542
543
544 if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
545 res = protect_kernel_text_ro(start, end);
546 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
547 forbidden |= res;
548 }
549
550
551 res = protect_pci_bios(pfn, pfn + npg - 1);
552 check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
553 forbidden |= res;
554
555 res = protect_rodata(pfn, pfn + npg - 1);
556 check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
557 forbidden |= res;
558
559 return __pgprot(pgprot_val(prot) & ~forbidden);
560}
561
562
563
564
565
566pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
567 unsigned int *level)
568{
569 p4d_t *p4d;
570 pud_t *pud;
571 pmd_t *pmd;
572
573 *level = PG_LEVEL_NONE;
574
575 if (pgd_none(*pgd))
576 return NULL;
577
578 p4d = p4d_offset(pgd, address);
579 if (p4d_none(*p4d))
580 return NULL;
581
582 *level = PG_LEVEL_512G;
583 if (p4d_large(*p4d) || !p4d_present(*p4d))
584 return (pte_t *)p4d;
585
586 pud = pud_offset(p4d, address);
587 if (pud_none(*pud))
588 return NULL;
589
590 *level = PG_LEVEL_1G;
591 if (pud_large(*pud) || !pud_present(*pud))
592 return (pte_t *)pud;
593
594 pmd = pmd_offset(pud, address);
595 if (pmd_none(*pmd))
596 return NULL;
597
598 *level = PG_LEVEL_2M;
599 if (pmd_large(*pmd) || !pmd_present(*pmd))
600 return (pte_t *)pmd;
601
602 *level = PG_LEVEL_4K;
603
604 return pte_offset_kernel(pmd, address);
605}
606
607
608
609
610
611
612
613
614
615pte_t *lookup_address(unsigned long address, unsigned int *level)
616{
617 return lookup_address_in_pgd(pgd_offset_k(address), address, level);
618}
619EXPORT_SYMBOL_GPL(lookup_address);
620
621
622
623
624
625pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
626 unsigned int *level)
627{
628 return lookup_address_in_pgd(pgd_offset(mm, address), address, level);
629}
630EXPORT_SYMBOL_GPL(lookup_address_in_mm);
631
632static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
633 unsigned int *level)
634{
635 if (cpa->pgd)
636 return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
637 address, level);
638
639 return lookup_address(address, level);
640}
641
642
643
644
645
646pmd_t *lookup_pmd_address(unsigned long address)
647{
648 pgd_t *pgd;
649 p4d_t *p4d;
650 pud_t *pud;
651
652 pgd = pgd_offset_k(address);
653 if (pgd_none(*pgd))
654 return NULL;
655
656 p4d = p4d_offset(pgd, address);
657 if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
658 return NULL;
659
660 pud = pud_offset(p4d, address);
661 if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
662 return NULL;
663
664 return pmd_offset(pud, address);
665}
666
667
668
669
670
671
672
673
674
675
676
677
678phys_addr_t slow_virt_to_phys(void *__virt_addr)
679{
680 unsigned long virt_addr = (unsigned long)__virt_addr;
681 phys_addr_t phys_addr;
682 unsigned long offset;
683 enum pg_level level;
684 pte_t *pte;
685
686 pte = lookup_address(virt_addr, &level);
687 BUG_ON(!pte);
688
689
690
691
692
693
694 switch (level) {
695 case PG_LEVEL_1G:
696 phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
697 offset = virt_addr & ~PUD_PAGE_MASK;
698 break;
699 case PG_LEVEL_2M:
700 phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
701 offset = virt_addr & ~PMD_PAGE_MASK;
702 break;
703 default:
704 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
705 offset = virt_addr & ~PAGE_MASK;
706 }
707
708 return (phys_addr_t)(phys_addr | offset);
709}
710EXPORT_SYMBOL_GPL(slow_virt_to_phys);
711
712
713
714
715static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
716{
717
718 set_pte_atomic(kpte, pte);
719#ifdef CONFIG_X86_32
720 if (!SHARED_KERNEL_PMD) {
721 struct page *page;
722
723 list_for_each_entry(page, &pgd_list, lru) {
724 pgd_t *pgd;
725 p4d_t *p4d;
726 pud_t *pud;
727 pmd_t *pmd;
728
729 pgd = (pgd_t *)page_address(page) + pgd_index(address);
730 p4d = p4d_offset(pgd, address);
731 pud = pud_offset(p4d, address);
732 pmd = pmd_offset(pud, address);
733 set_pte_atomic((pte_t *)pmd, pte);
734 }
735 }
736#endif
737}
738
739static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
740{
741
742
743
744
745
746
747
748
749
750 if (!(pgprot_val(prot) & _PAGE_PRESENT))
751 pgprot_val(prot) &= ~_PAGE_GLOBAL;
752
753 return prot;
754}
755
756static int __should_split_large_page(pte_t *kpte, unsigned long address,
757 struct cpa_data *cpa)
758{
759 unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
760 pgprot_t old_prot, new_prot, req_prot, chk_prot;
761 pte_t new_pte, *tmp;
762 enum pg_level level;
763
764
765
766
767
768 tmp = _lookup_address_cpa(cpa, address, &level);
769 if (tmp != kpte)
770 return 1;
771
772 switch (level) {
773 case PG_LEVEL_2M:
774 old_prot = pmd_pgprot(*(pmd_t *)kpte);
775 old_pfn = pmd_pfn(*(pmd_t *)kpte);
776 cpa_inc_2m_checked();
777 break;
778 case PG_LEVEL_1G:
779 old_prot = pud_pgprot(*(pud_t *)kpte);
780 old_pfn = pud_pfn(*(pud_t *)kpte);
781 cpa_inc_1g_checked();
782 break;
783 default:
784 return -EINVAL;
785 }
786
787 psize = page_level_size(level);
788 pmask = page_level_mask(level);
789
790
791
792
793
794 lpaddr = (address + psize) & pmask;
795 numpages = (lpaddr - address) >> PAGE_SHIFT;
796 if (numpages < cpa->numpages)
797 cpa->numpages = numpages;
798
799
800
801
802
803
804
805
806 req_prot = pgprot_large_2_4k(old_prot);
807
808 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
809 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
810
811
812
813
814
815
816 req_prot = pgprot_4k_2_large(req_prot);
817 req_prot = pgprot_clear_protnone_bits(req_prot);
818 if (pgprot_val(req_prot) & _PAGE_PRESENT)
819 pgprot_val(req_prot) |= _PAGE_PSE;
820
821
822
823
824
825 pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
826 cpa->pfn = pfn;
827
828
829
830
831
832 lpaddr = address & pmask;
833 numpages = psize >> PAGE_SHIFT;
834
835
836
837
838
839
840 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
841 psize, CPA_CONFLICT);
842
843 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
844
845
846
847
848 cpa->force_static_prot = 1;
849 return 1;
850 }
851
852
853
854
855
856
857
858
859
860
861 if (pgprot_val(req_prot) == pgprot_val(old_prot)) {
862 cpa_inc_lp_sameprot(level);
863 return 0;
864 }
865
866
867
868
869 if (address != lpaddr || cpa->numpages != numpages)
870 return 1;
871
872
873
874
875
876 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
877 psize, CPA_DETECT);
878
879
880
881
882
883
884
885
886
887
888 if (pgprot_val(req_prot) != pgprot_val(new_prot))
889 return 1;
890
891
892 new_pte = pfn_pte(old_pfn, new_prot);
893 __set_pmd_pte(kpte, address, new_pte);
894 cpa->flags |= CPA_FLUSHTLB;
895 cpa_inc_lp_preserved(level);
896 return 0;
897}
898
899static int should_split_large_page(pte_t *kpte, unsigned long address,
900 struct cpa_data *cpa)
901{
902 int do_split;
903
904 if (cpa->force_split)
905 return 1;
906
907 spin_lock(&pgd_lock);
908 do_split = __should_split_large_page(kpte, address, cpa);
909 spin_unlock(&pgd_lock);
910
911 return do_split;
912}
913
914static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
915 pgprot_t ref_prot, unsigned long address,
916 unsigned long size)
917{
918 unsigned int npg = PFN_DOWN(size);
919 pgprot_t prot;
920
921
922
923
924
925 if (!cpa->force_static_prot)
926 goto set;
927
928
929 prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
930
931 if (pgprot_val(prot) == pgprot_val(ref_prot))
932 goto set;
933
934
935
936
937
938
939
940
941
942 if (size == PAGE_SIZE)
943 ref_prot = prot;
944 else
945 pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
946set:
947 set_pte(pte, pfn_pte(pfn, ref_prot));
948}
949
950static int
951__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
952 struct page *base)
953{
954 unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
955 pte_t *pbase = (pte_t *)page_address(base);
956 unsigned int i, level;
957 pgprot_t ref_prot;
958 pte_t *tmp;
959
960 spin_lock(&pgd_lock);
961
962
963
964
965 tmp = _lookup_address_cpa(cpa, address, &level);
966 if (tmp != kpte) {
967 spin_unlock(&pgd_lock);
968 return 1;
969 }
970
971 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
972
973 switch (level) {
974 case PG_LEVEL_2M:
975 ref_prot = pmd_pgprot(*(pmd_t *)kpte);
976
977
978
979
980 ref_prot = pgprot_large_2_4k(ref_prot);
981 ref_pfn = pmd_pfn(*(pmd_t *)kpte);
982 lpaddr = address & PMD_MASK;
983 lpinc = PAGE_SIZE;
984 break;
985
986 case PG_LEVEL_1G:
987 ref_prot = pud_pgprot(*(pud_t *)kpte);
988 ref_pfn = pud_pfn(*(pud_t *)kpte);
989 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
990 lpaddr = address & PUD_MASK;
991 lpinc = PMD_SIZE;
992
993
994
995
996
997 if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
998 pgprot_val(ref_prot) &= ~_PAGE_PSE;
999 break;
1000
1001 default:
1002 spin_unlock(&pgd_lock);
1003 return 1;
1004 }
1005
1006 ref_prot = pgprot_clear_protnone_bits(ref_prot);
1007
1008
1009
1010
1011 pfn = ref_pfn;
1012 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
1013 split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
1014
1015 if (virt_addr_valid(address)) {
1016 unsigned long pfn = PFN_DOWN(__pa(address));
1017
1018 if (pfn_range_is_mapped(pfn, pfn + 1))
1019 split_page_count(level);
1020 }
1021
1022
1023
1024
1025
1026
1027
1028
1029 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049 flush_tlb_all();
1050 spin_unlock(&pgd_lock);
1051
1052 return 0;
1053}
1054
1055static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
1056 unsigned long address)
1057{
1058 struct page *base;
1059
1060 if (!debug_pagealloc_enabled())
1061 spin_unlock(&cpa_lock);
1062 base = alloc_pages(GFP_KERNEL, 0);
1063 if (!debug_pagealloc_enabled())
1064 spin_lock(&cpa_lock);
1065 if (!base)
1066 return -ENOMEM;
1067
1068 if (__split_large_page(cpa, kpte, address, base))
1069 __free_page(base);
1070
1071 return 0;
1072}
1073
1074static bool try_to_free_pte_page(pte_t *pte)
1075{
1076 int i;
1077
1078 for (i = 0; i < PTRS_PER_PTE; i++)
1079 if (!pte_none(pte[i]))
1080 return false;
1081
1082 free_page((unsigned long)pte);
1083 return true;
1084}
1085
1086static bool try_to_free_pmd_page(pmd_t *pmd)
1087{
1088 int i;
1089
1090 for (i = 0; i < PTRS_PER_PMD; i++)
1091 if (!pmd_none(pmd[i]))
1092 return false;
1093
1094 free_page((unsigned long)pmd);
1095 return true;
1096}
1097
1098static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
1099{
1100 pte_t *pte = pte_offset_kernel(pmd, start);
1101
1102 while (start < end) {
1103 set_pte(pte, __pte(0));
1104
1105 start += PAGE_SIZE;
1106 pte++;
1107 }
1108
1109 if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
1110 pmd_clear(pmd);
1111 return true;
1112 }
1113 return false;
1114}
1115
1116static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
1117 unsigned long start, unsigned long end)
1118{
1119 if (unmap_pte_range(pmd, start, end))
1120 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
1121 pud_clear(pud);
1122}
1123
1124static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
1125{
1126 pmd_t *pmd = pmd_offset(pud, start);
1127
1128
1129
1130
1131 if (start & (PMD_SIZE - 1)) {
1132 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1133 unsigned long pre_end = min_t(unsigned long, end, next_page);
1134
1135 __unmap_pmd_range(pud, pmd, start, pre_end);
1136
1137 start = pre_end;
1138 pmd++;
1139 }
1140
1141
1142
1143
1144 while (end - start >= PMD_SIZE) {
1145 if (pmd_large(*pmd))
1146 pmd_clear(pmd);
1147 else
1148 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
1149
1150 start += PMD_SIZE;
1151 pmd++;
1152 }
1153
1154
1155
1156
1157 if (start < end)
1158 return __unmap_pmd_range(pud, pmd, start, end);
1159
1160
1161
1162
1163 if (!pud_none(*pud))
1164 if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
1165 pud_clear(pud);
1166}
1167
1168static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
1169{
1170 pud_t *pud = pud_offset(p4d, start);
1171
1172
1173
1174
1175 if (start & (PUD_SIZE - 1)) {
1176 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1177 unsigned long pre_end = min_t(unsigned long, end, next_page);
1178
1179 unmap_pmd_range(pud, start, pre_end);
1180
1181 start = pre_end;
1182 pud++;
1183 }
1184
1185
1186
1187
1188 while (end - start >= PUD_SIZE) {
1189
1190 if (pud_large(*pud))
1191 pud_clear(pud);
1192 else
1193 unmap_pmd_range(pud, start, start + PUD_SIZE);
1194
1195 start += PUD_SIZE;
1196 pud++;
1197 }
1198
1199
1200
1201
1202 if (start < end)
1203 unmap_pmd_range(pud, start, end);
1204
1205
1206
1207
1208
1209}
1210
1211static int alloc_pte_page(pmd_t *pmd)
1212{
1213 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
1214 if (!pte)
1215 return -1;
1216
1217 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
1218 return 0;
1219}
1220
1221static int alloc_pmd_page(pud_t *pud)
1222{
1223 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
1224 if (!pmd)
1225 return -1;
1226
1227 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
1228 return 0;
1229}
1230
1231static void populate_pte(struct cpa_data *cpa,
1232 unsigned long start, unsigned long end,
1233 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
1234{
1235 pte_t *pte;
1236
1237 pte = pte_offset_kernel(pmd, start);
1238
1239 pgprot = pgprot_clear_protnone_bits(pgprot);
1240
1241 while (num_pages-- && start < end) {
1242 set_pte(pte, pfn_pte(cpa->pfn, pgprot));
1243
1244 start += PAGE_SIZE;
1245 cpa->pfn++;
1246 pte++;
1247 }
1248}
1249
1250static long populate_pmd(struct cpa_data *cpa,
1251 unsigned long start, unsigned long end,
1252 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
1253{
1254 long cur_pages = 0;
1255 pmd_t *pmd;
1256 pgprot_t pmd_pgprot;
1257
1258
1259
1260
1261 if (start & (PMD_SIZE - 1)) {
1262 unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
1263 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1264
1265 pre_end = min_t(unsigned long, pre_end, next_page);
1266 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1267 cur_pages = min_t(unsigned int, num_pages, cur_pages);
1268
1269
1270
1271
1272 pmd = pmd_offset(pud, start);
1273 if (pmd_none(*pmd))
1274 if (alloc_pte_page(pmd))
1275 return -1;
1276
1277 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
1278
1279 start = pre_end;
1280 }
1281
1282
1283
1284
1285 if (num_pages == cur_pages)
1286 return cur_pages;
1287
1288 pmd_pgprot = pgprot_4k_2_large(pgprot);
1289
1290 while (end - start >= PMD_SIZE) {
1291
1292
1293
1294
1295 if (pud_none(*pud))
1296 if (alloc_pmd_page(pud))
1297 return -1;
1298
1299 pmd = pmd_offset(pud, start);
1300
1301 set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
1302 canon_pgprot(pmd_pgprot))));
1303
1304 start += PMD_SIZE;
1305 cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
1306 cur_pages += PMD_SIZE >> PAGE_SHIFT;
1307 }
1308
1309
1310
1311
1312 if (start < end) {
1313 pmd = pmd_offset(pud, start);
1314 if (pmd_none(*pmd))
1315 if (alloc_pte_page(pmd))
1316 return -1;
1317
1318 populate_pte(cpa, start, end, num_pages - cur_pages,
1319 pmd, pgprot);
1320 }
1321 return num_pages;
1322}
1323
1324static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
1325 pgprot_t pgprot)
1326{
1327 pud_t *pud;
1328 unsigned long end;
1329 long cur_pages = 0;
1330 pgprot_t pud_pgprot;
1331
1332 end = start + (cpa->numpages << PAGE_SHIFT);
1333
1334
1335
1336
1337
1338 if (start & (PUD_SIZE - 1)) {
1339 unsigned long pre_end;
1340 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1341
1342 pre_end = min_t(unsigned long, end, next_page);
1343 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1344 cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
1345
1346 pud = pud_offset(p4d, start);
1347
1348
1349
1350
1351 if (pud_none(*pud))
1352 if (alloc_pmd_page(pud))
1353 return -1;
1354
1355 cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
1356 pud, pgprot);
1357 if (cur_pages < 0)
1358 return cur_pages;
1359
1360 start = pre_end;
1361 }
1362
1363
1364 if (cpa->numpages == cur_pages)
1365 return cur_pages;
1366
1367 pud = pud_offset(p4d, start);
1368 pud_pgprot = pgprot_4k_2_large(pgprot);
1369
1370
1371
1372
1373 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
1374 set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
1375 canon_pgprot(pud_pgprot))));
1376
1377 start += PUD_SIZE;
1378 cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
1379 cur_pages += PUD_SIZE >> PAGE_SHIFT;
1380 pud++;
1381 }
1382
1383
1384 if (start < end) {
1385 long tmp;
1386
1387 pud = pud_offset(p4d, start);
1388 if (pud_none(*pud))
1389 if (alloc_pmd_page(pud))
1390 return -1;
1391
1392 tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
1393 pud, pgprot);
1394 if (tmp < 0)
1395 return cur_pages;
1396
1397 cur_pages += tmp;
1398 }
1399 return cur_pages;
1400}
1401
1402
1403
1404
1405
1406static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1407{
1408 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
1409 pud_t *pud = NULL;
1410 p4d_t *p4d;
1411 pgd_t *pgd_entry;
1412 long ret;
1413
1414 pgd_entry = cpa->pgd + pgd_index(addr);
1415
1416 if (pgd_none(*pgd_entry)) {
1417 p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
1418 if (!p4d)
1419 return -1;
1420
1421 set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
1422 }
1423
1424
1425
1426
1427 p4d = p4d_offset(pgd_entry, addr);
1428 if (p4d_none(*p4d)) {
1429 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
1430 if (!pud)
1431 return -1;
1432
1433 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
1434 }
1435
1436 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
1437 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
1438
1439 ret = populate_pud(cpa, addr, p4d, pgprot);
1440 if (ret < 0) {
1441
1442
1443
1444
1445
1446 unmap_pud_range(p4d, addr,
1447 addr + (cpa->numpages << PAGE_SHIFT));
1448 return ret;
1449 }
1450
1451 cpa->numpages = ret;
1452 return 0;
1453}
1454
1455static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1456 int primary)
1457{
1458 if (cpa->pgd) {
1459
1460
1461
1462
1463
1464 return populate_pgd(cpa, vaddr);
1465 }
1466
1467
1468
1469
1470 if (!primary) {
1471 cpa->numpages = 1;
1472 return 0;
1473 }
1474
1475
1476
1477
1478
1479
1480
1481
1482 if (within(vaddr, PAGE_OFFSET,
1483 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
1484 cpa->numpages = 1;
1485 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1486 return 0;
1487
1488 } else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1489
1490 return -EFAULT;
1491 } else {
1492 WARN(1, KERN_WARNING "CPA: called for zero pte. "
1493 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
1494 *cpa->vaddr);
1495
1496 return -EFAULT;
1497 }
1498}
1499
1500static int __change_page_attr(struct cpa_data *cpa, int primary)
1501{
1502 unsigned long address;
1503 int do_split, err;
1504 unsigned int level;
1505 pte_t *kpte, old_pte;
1506
1507 address = __cpa_addr(cpa, cpa->curpage);
1508repeat:
1509 kpte = _lookup_address_cpa(cpa, address, &level);
1510 if (!kpte)
1511 return __cpa_process_fault(cpa, address, primary);
1512
1513 old_pte = *kpte;
1514 if (pte_none(old_pte))
1515 return __cpa_process_fault(cpa, address, primary);
1516
1517 if (level == PG_LEVEL_4K) {
1518 pte_t new_pte;
1519 pgprot_t new_prot = pte_pgprot(old_pte);
1520 unsigned long pfn = pte_pfn(old_pte);
1521
1522 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
1523 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
1524
1525 cpa_inc_4k_install();
1526
1527 new_prot = static_protections(new_prot, address, pfn, 1, 0,
1528 CPA_PROTECT);
1529
1530 new_prot = pgprot_clear_protnone_bits(new_prot);
1531
1532
1533
1534
1535
1536
1537 new_pte = pfn_pte(pfn, new_prot);
1538 cpa->pfn = pfn;
1539
1540
1541
1542 if (pte_val(old_pte) != pte_val(new_pte)) {
1543 set_pte_atomic(kpte, new_pte);
1544 cpa->flags |= CPA_FLUSHTLB;
1545 }
1546 cpa->numpages = 1;
1547 return 0;
1548 }
1549
1550
1551
1552
1553
1554 do_split = should_split_large_page(kpte, address, cpa);
1555
1556
1557
1558
1559
1560 if (do_split <= 0)
1561 return do_split;
1562
1563
1564
1565
1566 err = split_large_page(cpa, kpte, address);
1567 if (!err)
1568 goto repeat;
1569
1570 return err;
1571}
1572
1573static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
1574
1575static int cpa_process_alias(struct cpa_data *cpa)
1576{
1577 struct cpa_data alias_cpa;
1578 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
1579 unsigned long vaddr;
1580 int ret;
1581
1582 if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
1583 return 0;
1584
1585
1586
1587
1588
1589 vaddr = __cpa_addr(cpa, cpa->curpage);
1590 if (!(within(vaddr, PAGE_OFFSET,
1591 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
1592
1593 alias_cpa = *cpa;
1594 alias_cpa.vaddr = &laddr;
1595 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1596 alias_cpa.curpage = 0;
1597
1598 cpa->force_flush_all = 1;
1599
1600 ret = __change_page_attr_set_clr(&alias_cpa, 0);
1601 if (ret)
1602 return ret;
1603 }
1604
1605#ifdef CONFIG_X86_64
1606
1607
1608
1609
1610
1611 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1612 __cpa_pfn_in_highmap(cpa->pfn)) {
1613 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1614 __START_KERNEL_map - phys_base;
1615 alias_cpa = *cpa;
1616 alias_cpa.vaddr = &temp_cpa_vaddr;
1617 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1618 alias_cpa.curpage = 0;
1619
1620 cpa->force_flush_all = 1;
1621
1622
1623
1624
1625 __change_page_attr_set_clr(&alias_cpa, 0);
1626 }
1627#endif
1628
1629 return 0;
1630}
1631
1632static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1633{
1634 unsigned long numpages = cpa->numpages;
1635 unsigned long rempages = numpages;
1636 int ret = 0;
1637
1638 while (rempages) {
1639
1640
1641
1642
1643 cpa->numpages = rempages;
1644
1645 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
1646 cpa->numpages = 1;
1647
1648 if (!debug_pagealloc_enabled())
1649 spin_lock(&cpa_lock);
1650 ret = __change_page_attr(cpa, checkalias);
1651 if (!debug_pagealloc_enabled())
1652 spin_unlock(&cpa_lock);
1653 if (ret)
1654 goto out;
1655
1656 if (checkalias) {
1657 ret = cpa_process_alias(cpa);
1658 if (ret)
1659 goto out;
1660 }
1661
1662
1663
1664
1665
1666
1667 BUG_ON(cpa->numpages > rempages || !cpa->numpages);
1668 rempages -= cpa->numpages;
1669 cpa->curpage += cpa->numpages;
1670 }
1671
1672out:
1673
1674 cpa->numpages = numpages;
1675 return ret;
1676}
1677
1678static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1679 pgprot_t mask_set, pgprot_t mask_clr,
1680 int force_split, int in_flag,
1681 struct page **pages)
1682{
1683 struct cpa_data cpa;
1684 int ret, cache, checkalias;
1685
1686 memset(&cpa, 0, sizeof(cpa));
1687
1688
1689
1690
1691
1692 mask_set = canon_pgprot(mask_set);
1693
1694 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
1695 return 0;
1696
1697
1698 if (in_flag & CPA_ARRAY) {
1699 int i;
1700 for (i = 0; i < numpages; i++) {
1701 if (addr[i] & ~PAGE_MASK) {
1702 addr[i] &= PAGE_MASK;
1703 WARN_ON_ONCE(1);
1704 }
1705 }
1706 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
1707
1708
1709
1710
1711 if (*addr & ~PAGE_MASK) {
1712 *addr &= PAGE_MASK;
1713
1714
1715
1716 WARN_ON_ONCE(1);
1717 }
1718 }
1719
1720
1721 kmap_flush_unused();
1722
1723 vm_unmap_aliases();
1724
1725 cpa.vaddr = addr;
1726 cpa.pages = pages;
1727 cpa.numpages = numpages;
1728 cpa.mask_set = mask_set;
1729 cpa.mask_clr = mask_clr;
1730 cpa.flags = 0;
1731 cpa.curpage = 0;
1732 cpa.force_split = force_split;
1733
1734 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
1735 cpa.flags |= in_flag;
1736
1737
1738 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
1739
1740 ret = __change_page_attr_set_clr(&cpa, checkalias);
1741
1742
1743
1744
1745 if (!(cpa.flags & CPA_FLUSHTLB))
1746 goto out;
1747
1748
1749
1750
1751
1752 cache = !!pgprot2cachemode(mask_set);
1753
1754
1755
1756
1757 if (ret) {
1758 cpa_flush_all(cache);
1759 goto out;
1760 }
1761
1762 cpa_flush(&cpa, cache);
1763out:
1764 return ret;
1765}
1766
1767static inline int change_page_attr_set(unsigned long *addr, int numpages,
1768 pgprot_t mask, int array)
1769{
1770 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
1771 (array ? CPA_ARRAY : 0), NULL);
1772}
1773
1774static inline int change_page_attr_clear(unsigned long *addr, int numpages,
1775 pgprot_t mask, int array)
1776{
1777 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
1778 (array ? CPA_ARRAY : 0), NULL);
1779}
1780
1781static inline int cpa_set_pages_array(struct page **pages, int numpages,
1782 pgprot_t mask)
1783{
1784 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
1785 CPA_PAGES_ARRAY, pages);
1786}
1787
1788static inline int cpa_clear_pages_array(struct page **pages, int numpages,
1789 pgprot_t mask)
1790{
1791 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
1792 CPA_PAGES_ARRAY, pages);
1793}
1794
1795int _set_memory_uc(unsigned long addr, int numpages)
1796{
1797
1798
1799
1800
1801
1802
1803 return change_page_attr_set(&addr, numpages,
1804 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1805 0);
1806}
1807
1808int set_memory_uc(unsigned long addr, int numpages)
1809{
1810 int ret;
1811
1812
1813
1814
1815 ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1816 _PAGE_CACHE_MODE_UC_MINUS, NULL);
1817 if (ret)
1818 goto out_err;
1819
1820 ret = _set_memory_uc(addr, numpages);
1821 if (ret)
1822 goto out_free;
1823
1824 return 0;
1825
1826out_free:
1827 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1828out_err:
1829 return ret;
1830}
1831EXPORT_SYMBOL(set_memory_uc);
1832
1833int _set_memory_wc(unsigned long addr, int numpages)
1834{
1835 int ret;
1836
1837 ret = change_page_attr_set(&addr, numpages,
1838 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1839 0);
1840 if (!ret) {
1841 ret = change_page_attr_set_clr(&addr, numpages,
1842 cachemode2pgprot(_PAGE_CACHE_MODE_WC),
1843 __pgprot(_PAGE_CACHE_MASK),
1844 0, 0, NULL);
1845 }
1846 return ret;
1847}
1848
1849int set_memory_wc(unsigned long addr, int numpages)
1850{
1851 int ret;
1852
1853 ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1854 _PAGE_CACHE_MODE_WC, NULL);
1855 if (ret)
1856 return ret;
1857
1858 ret = _set_memory_wc(addr, numpages);
1859 if (ret)
1860 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1861
1862 return ret;
1863}
1864EXPORT_SYMBOL(set_memory_wc);
1865
1866int _set_memory_wt(unsigned long addr, int numpages)
1867{
1868 return change_page_attr_set(&addr, numpages,
1869 cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
1870}
1871
1872int _set_memory_wb(unsigned long addr, int numpages)
1873{
1874
1875 return change_page_attr_clear(&addr, numpages,
1876 __pgprot(_PAGE_CACHE_MASK), 0);
1877}
1878
1879int set_memory_wb(unsigned long addr, int numpages)
1880{
1881 int ret;
1882
1883 ret = _set_memory_wb(addr, numpages);
1884 if (ret)
1885 return ret;
1886
1887 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1888 return 0;
1889}
1890EXPORT_SYMBOL(set_memory_wb);
1891
1892int set_memory_x(unsigned long addr, int numpages)
1893{
1894 if (!(__supported_pte_mask & _PAGE_NX))
1895 return 0;
1896
1897 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
1898}
1899
1900int set_memory_nx(unsigned long addr, int numpages)
1901{
1902 if (!(__supported_pte_mask & _PAGE_NX))
1903 return 0;
1904
1905 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
1906}
1907
1908int set_memory_ro(unsigned long addr, int numpages)
1909{
1910 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
1911}
1912
1913int set_memory_rw(unsigned long addr, int numpages)
1914{
1915 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
1916}
1917
1918int set_memory_np(unsigned long addr, int numpages)
1919{
1920 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
1921}
1922
1923int set_memory_4k(unsigned long addr, int numpages)
1924{
1925 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1926 __pgprot(0), 1, 0, NULL);
1927}
1928
1929int set_memory_nonglobal(unsigned long addr, int numpages)
1930{
1931 return change_page_attr_clear(&addr, numpages,
1932 __pgprot(_PAGE_GLOBAL), 0);
1933}
1934
1935int set_memory_global(unsigned long addr, int numpages)
1936{
1937 return change_page_attr_set(&addr, numpages,
1938 __pgprot(_PAGE_GLOBAL), 0);
1939}
1940
1941static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
1942{
1943 struct cpa_data cpa;
1944 int ret;
1945
1946
1947 if (!mem_encrypt_active())
1948 return 0;
1949
1950
1951 if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
1952 addr &= PAGE_MASK;
1953
1954 memset(&cpa, 0, sizeof(cpa));
1955 cpa.vaddr = &addr;
1956 cpa.numpages = numpages;
1957 cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
1958 cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
1959 cpa.pgd = init_mm.pgd;
1960
1961
1962 kmap_flush_unused();
1963 vm_unmap_aliases();
1964
1965
1966
1967
1968 cpa_flush(&cpa, 1);
1969
1970 ret = __change_page_attr_set_clr(&cpa, 1);
1971
1972
1973
1974
1975
1976
1977
1978
1979 cpa_flush(&cpa, 0);
1980
1981 return ret;
1982}
1983
1984int set_memory_encrypted(unsigned long addr, int numpages)
1985{
1986 return __set_memory_enc_dec(addr, numpages, true);
1987}
1988EXPORT_SYMBOL_GPL(set_memory_encrypted);
1989
1990int set_memory_decrypted(unsigned long addr, int numpages)
1991{
1992 return __set_memory_enc_dec(addr, numpages, false);
1993}
1994EXPORT_SYMBOL_GPL(set_memory_decrypted);
1995
1996int set_pages_uc(struct page *page, int numpages)
1997{
1998 unsigned long addr = (unsigned long)page_address(page);
1999
2000 return set_memory_uc(addr, numpages);
2001}
2002EXPORT_SYMBOL(set_pages_uc);
2003
2004static int _set_pages_array(struct page **pages, int numpages,
2005 enum page_cache_mode new_type)
2006{
2007 unsigned long start;
2008 unsigned long end;
2009 enum page_cache_mode set_type;
2010 int i;
2011 int free_idx;
2012 int ret;
2013
2014 for (i = 0; i < numpages; i++) {
2015 if (PageHighMem(pages[i]))
2016 continue;
2017 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2018 end = start + PAGE_SIZE;
2019 if (memtype_reserve(start, end, new_type, NULL))
2020 goto err_out;
2021 }
2022
2023
2024 set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
2025 _PAGE_CACHE_MODE_UC_MINUS : new_type;
2026
2027 ret = cpa_set_pages_array(pages, numpages,
2028 cachemode2pgprot(set_type));
2029 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
2030 ret = change_page_attr_set_clr(NULL, numpages,
2031 cachemode2pgprot(
2032 _PAGE_CACHE_MODE_WC),
2033 __pgprot(_PAGE_CACHE_MASK),
2034 0, CPA_PAGES_ARRAY, pages);
2035 if (ret)
2036 goto err_out;
2037 return 0;
2038err_out:
2039 free_idx = i;
2040 for (i = 0; i < free_idx; i++) {
2041 if (PageHighMem(pages[i]))
2042 continue;
2043 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2044 end = start + PAGE_SIZE;
2045 memtype_free(start, end);
2046 }
2047 return -EINVAL;
2048}
2049
2050int set_pages_array_uc(struct page **pages, int numpages)
2051{
2052 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS);
2053}
2054EXPORT_SYMBOL(set_pages_array_uc);
2055
2056int set_pages_array_wc(struct page **pages, int numpages)
2057{
2058 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC);
2059}
2060EXPORT_SYMBOL(set_pages_array_wc);
2061
2062int set_pages_array_wt(struct page **pages, int numpages)
2063{
2064 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WT);
2065}
2066EXPORT_SYMBOL_GPL(set_pages_array_wt);
2067
2068int set_pages_wb(struct page *page, int numpages)
2069{
2070 unsigned long addr = (unsigned long)page_address(page);
2071
2072 return set_memory_wb(addr, numpages);
2073}
2074EXPORT_SYMBOL(set_pages_wb);
2075
2076int set_pages_array_wb(struct page **pages, int numpages)
2077{
2078 int retval;
2079 unsigned long start;
2080 unsigned long end;
2081 int i;
2082
2083
2084 retval = cpa_clear_pages_array(pages, numpages,
2085 __pgprot(_PAGE_CACHE_MASK));
2086 if (retval)
2087 return retval;
2088
2089 for (i = 0; i < numpages; i++) {
2090 if (PageHighMem(pages[i]))
2091 continue;
2092 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2093 end = start + PAGE_SIZE;
2094 memtype_free(start, end);
2095 }
2096
2097 return 0;
2098}
2099EXPORT_SYMBOL(set_pages_array_wb);
2100
2101int set_pages_ro(struct page *page, int numpages)
2102{
2103 unsigned long addr = (unsigned long)page_address(page);
2104
2105 return set_memory_ro(addr, numpages);
2106}
2107
2108int set_pages_rw(struct page *page, int numpages)
2109{
2110 unsigned long addr = (unsigned long)page_address(page);
2111
2112 return set_memory_rw(addr, numpages);
2113}
2114
2115static int __set_pages_p(struct page *page, int numpages)
2116{
2117 unsigned long tempaddr = (unsigned long) page_address(page);
2118 struct cpa_data cpa = { .vaddr = &tempaddr,
2119 .pgd = NULL,
2120 .numpages = numpages,
2121 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2122 .mask_clr = __pgprot(0),
2123 .flags = 0};
2124
2125
2126
2127
2128
2129
2130
2131 return __change_page_attr_set_clr(&cpa, 0);
2132}
2133
2134static int __set_pages_np(struct page *page, int numpages)
2135{
2136 unsigned long tempaddr = (unsigned long) page_address(page);
2137 struct cpa_data cpa = { .vaddr = &tempaddr,
2138 .pgd = NULL,
2139 .numpages = numpages,
2140 .mask_set = __pgprot(0),
2141 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2142 .flags = 0};
2143
2144
2145
2146
2147
2148
2149
2150 return __change_page_attr_set_clr(&cpa, 0);
2151}
2152
2153int set_direct_map_invalid_noflush(struct page *page)
2154{
2155 return __set_pages_np(page, 1);
2156}
2157
2158int set_direct_map_default_noflush(struct page *page)
2159{
2160 return __set_pages_p(page, 1);
2161}
2162
2163#ifdef CONFIG_DEBUG_PAGEALLOC
2164void __kernel_map_pages(struct page *page, int numpages, int enable)
2165{
2166 if (PageHighMem(page))
2167 return;
2168 if (!enable) {
2169 debug_check_no_locks_freed(page_address(page),
2170 numpages * PAGE_SIZE);
2171 }
2172
2173
2174
2175
2176
2177
2178 if (enable)
2179 __set_pages_p(page, numpages);
2180 else
2181 __set_pages_np(page, numpages);
2182
2183
2184
2185
2186
2187
2188
2189 preempt_disable();
2190 __flush_tlb_all();
2191 preempt_enable();
2192
2193 arch_flush_lazy_mmu_mode();
2194}
2195#endif
2196
2197bool kernel_page_present(struct page *page)
2198{
2199 unsigned int level;
2200 pte_t *pte;
2201
2202 if (PageHighMem(page))
2203 return false;
2204
2205 pte = lookup_address((unsigned long)page_address(page), &level);
2206 return (pte_val(*pte) & _PAGE_PRESENT);
2207}
2208
2209int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2210 unsigned numpages, unsigned long page_flags)
2211{
2212 int retval = -EINVAL;
2213
2214 struct cpa_data cpa = {
2215 .vaddr = &address,
2216 .pfn = pfn,
2217 .pgd = pgd,
2218 .numpages = numpages,
2219 .mask_set = __pgprot(0),
2220 .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
2221 .flags = 0,
2222 };
2223
2224 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2225
2226 if (!(__supported_pte_mask & _PAGE_NX))
2227 goto out;
2228
2229 if (!(page_flags & _PAGE_ENC))
2230 cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
2231
2232 cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
2233
2234 retval = __change_page_attr_set_clr(&cpa, 0);
2235 __flush_tlb_all();
2236
2237out:
2238 return retval;
2239}
2240
2241
2242
2243
2244
2245
2246int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
2247 unsigned long numpages)
2248{
2249 int retval;
2250
2251
2252
2253
2254
2255
2256
2257 struct cpa_data cpa = {
2258 .vaddr = &address,
2259 .pfn = 0,
2260 .pgd = pgd,
2261 .numpages = numpages,
2262 .mask_set = __pgprot(0),
2263 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2264 .flags = 0,
2265 };
2266
2267 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2268
2269 retval = __change_page_attr_set_clr(&cpa, 0);
2270 __flush_tlb_all();
2271
2272 return retval;
2273}
2274
2275
2276
2277
2278
2279#ifdef CONFIG_CPA_DEBUG
2280#include "cpa-test.c"
2281#endif
2282