1
2
3
4
5
6#include <linux/highmem.h>
7#include <linux/memblock.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/interrupt.h>
11#include <linux/seq_file.h>
12#include <linux/debugfs.h>
13#include <linux/pfn.h>
14#include <linux/percpu.h>
15#include <linux/gfp.h>
16#include <linux/pci.h>
17#include <linux/vmalloc.h>
18#include <linux/libnvdimm.h>
19#include <linux/vmstat.h>
20#include <linux/kernel.h>
21#include <linux/cc_platform.h>
22
23#include <asm/e820/api.h>
24#include <asm/processor.h>
25#include <asm/tlbflush.h>
26#include <asm/sections.h>
27#include <asm/setup.h>
28#include <linux/uaccess.h>
29#include <asm/pgalloc.h>
30#include <asm/proto.h>
31#include <asm/memtype.h>
32#include <asm/set_memory.h>
33#include <asm/hyperv-tlfs.h>
34#include <asm/mshyperv.h>
35
36#include "../mm_internal.h"
37
38
39
40
41struct cpa_data {
42 unsigned long *vaddr;
43 pgd_t *pgd;
44 pgprot_t mask_set;
45 pgprot_t mask_clr;
46 unsigned long numpages;
47 unsigned long curpage;
48 unsigned long pfn;
49 unsigned int flags;
50 unsigned int force_split : 1,
51 force_static_prot : 1,
52 force_flush_all : 1;
53 struct page **pages;
54};
55
56enum cpa_warn {
57 CPA_CONFLICT,
58 CPA_PROTECT,
59 CPA_DETECT,
60};
61
62static const int cpa_warn_level = CPA_PROTECT;
63
64
65
66
67
68
69
70static DEFINE_SPINLOCK(cpa_lock);
71
72#define CPA_FLUSHTLB 1
73#define CPA_ARRAY 2
74#define CPA_PAGES_ARRAY 4
75#define CPA_NO_CHECK_ALIAS 8
76
77static inline pgprot_t cachemode2pgprot(enum page_cache_mode pcm)
78{
79 return __pgprot(cachemode2protval(pcm));
80}
81
82#ifdef CONFIG_PROC_FS
83static unsigned long direct_pages_count[PG_LEVEL_NUM];
84
85void update_page_count(int level, unsigned long pages)
86{
87
88 spin_lock(&pgd_lock);
89 direct_pages_count[level] += pages;
90 spin_unlock(&pgd_lock);
91}
92
93static void split_page_count(int level)
94{
95 if (direct_pages_count[level] == 0)
96 return;
97
98 direct_pages_count[level]--;
99 if (system_state == SYSTEM_RUNNING) {
100 if (level == PG_LEVEL_2M)
101 count_vm_event(DIRECT_MAP_LEVEL2_SPLIT);
102 else if (level == PG_LEVEL_1G)
103 count_vm_event(DIRECT_MAP_LEVEL3_SPLIT);
104 }
105 direct_pages_count[level - 1] += PTRS_PER_PTE;
106}
107
108void arch_report_meminfo(struct seq_file *m)
109{
110 seq_printf(m, "DirectMap4k: %8lu kB\n",
111 direct_pages_count[PG_LEVEL_4K] << 2);
112#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
113 seq_printf(m, "DirectMap2M: %8lu kB\n",
114 direct_pages_count[PG_LEVEL_2M] << 11);
115#else
116 seq_printf(m, "DirectMap4M: %8lu kB\n",
117 direct_pages_count[PG_LEVEL_2M] << 12);
118#endif
119 if (direct_gbpages)
120 seq_printf(m, "DirectMap1G: %8lu kB\n",
121 direct_pages_count[PG_LEVEL_1G] << 20);
122}
123#else
124static inline void split_page_count(int level) { }
125#endif
126
127#ifdef CONFIG_X86_CPA_STATISTICS
128
129static unsigned long cpa_1g_checked;
130static unsigned long cpa_1g_sameprot;
131static unsigned long cpa_1g_preserved;
132static unsigned long cpa_2m_checked;
133static unsigned long cpa_2m_sameprot;
134static unsigned long cpa_2m_preserved;
135static unsigned long cpa_4k_install;
136
137static inline void cpa_inc_1g_checked(void)
138{
139 cpa_1g_checked++;
140}
141
142static inline void cpa_inc_2m_checked(void)
143{
144 cpa_2m_checked++;
145}
146
147static inline void cpa_inc_4k_install(void)
148{
149 data_race(cpa_4k_install++);
150}
151
152static inline void cpa_inc_lp_sameprot(int level)
153{
154 if (level == PG_LEVEL_1G)
155 cpa_1g_sameprot++;
156 else
157 cpa_2m_sameprot++;
158}
159
160static inline void cpa_inc_lp_preserved(int level)
161{
162 if (level == PG_LEVEL_1G)
163 cpa_1g_preserved++;
164 else
165 cpa_2m_preserved++;
166}
167
168static int cpastats_show(struct seq_file *m, void *p)
169{
170 seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked);
171 seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot);
172 seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved);
173 seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked);
174 seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot);
175 seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved);
176 seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install);
177 return 0;
178}
179
180static int cpastats_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, cpastats_show, NULL);
183}
184
185static const struct file_operations cpastats_fops = {
186 .open = cpastats_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
192static int __init cpa_stats_init(void)
193{
194 debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL,
195 &cpastats_fops);
196 return 0;
197}
198late_initcall(cpa_stats_init);
199#else
200static inline void cpa_inc_1g_checked(void) { }
201static inline void cpa_inc_2m_checked(void) { }
202static inline void cpa_inc_4k_install(void) { }
203static inline void cpa_inc_lp_sameprot(int level) { }
204static inline void cpa_inc_lp_preserved(int level) { }
205#endif
206
207
208static inline int
209within(unsigned long addr, unsigned long start, unsigned long end)
210{
211 return addr >= start && addr < end;
212}
213
214static inline int
215within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
216{
217 return addr >= start && addr <= end;
218}
219
220#ifdef CONFIG_X86_64
221
222static inline unsigned long highmap_start_pfn(void)
223{
224 return __pa_symbol(_text) >> PAGE_SHIFT;
225}
226
227static inline unsigned long highmap_end_pfn(void)
228{
229
230 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
231}
232
233static bool __cpa_pfn_in_highmap(unsigned long pfn)
234{
235
236
237
238
239 return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
240}
241
242#else
243
244static bool __cpa_pfn_in_highmap(unsigned long pfn)
245{
246
247 return false;
248}
249
250#endif
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266static inline unsigned long fix_addr(unsigned long addr)
267{
268#ifdef CONFIG_X86_64
269 return (long)(addr << 1) >> 1;
270#else
271 return addr;
272#endif
273}
274
275static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
276{
277 if (cpa->flags & CPA_PAGES_ARRAY) {
278 struct page *page = cpa->pages[idx];
279
280 if (unlikely(PageHighMem(page)))
281 return 0;
282
283 return (unsigned long)page_address(page);
284 }
285
286 if (cpa->flags & CPA_ARRAY)
287 return cpa->vaddr[idx];
288
289 return *cpa->vaddr + idx * PAGE_SIZE;
290}
291
292
293
294
295
296static void clflush_cache_range_opt(void *vaddr, unsigned int size)
297{
298 const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
299 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
300 void *vend = vaddr + size;
301
302 if (p >= vend)
303 return;
304
305 for (; p < vend; p += clflush_size)
306 clflushopt(p);
307}
308
309
310
311
312
313
314
315
316
317void clflush_cache_range(void *vaddr, unsigned int size)
318{
319 mb();
320 clflush_cache_range_opt(vaddr, size);
321 mb();
322}
323EXPORT_SYMBOL_GPL(clflush_cache_range);
324
325#ifdef CONFIG_ARCH_HAS_PMEM_API
326void arch_invalidate_pmem(void *addr, size_t size)
327{
328 clflush_cache_range(addr, size);
329}
330EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
331#endif
332
333static void __cpa_flush_all(void *arg)
334{
335 unsigned long cache = (unsigned long)arg;
336
337
338
339
340
341 __flush_tlb_all();
342
343 if (cache && boot_cpu_data.x86 >= 4)
344 wbinvd();
345}
346
347static void cpa_flush_all(unsigned long cache)
348{
349 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
350
351 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
352}
353
354static void __cpa_flush_tlb(void *data)
355{
356 struct cpa_data *cpa = data;
357 unsigned int i;
358
359 for (i = 0; i < cpa->numpages; i++)
360 flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i)));
361}
362
363static void cpa_flush(struct cpa_data *data, int cache)
364{
365 struct cpa_data *cpa = data;
366 unsigned int i;
367
368 BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
369
370 if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
371 cpa_flush_all(cache);
372 return;
373 }
374
375 if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling)
376 flush_tlb_all();
377 else
378 on_each_cpu(__cpa_flush_tlb, cpa, 1);
379
380 if (!cache)
381 return;
382
383 mb();
384 for (i = 0; i < cpa->numpages; i++) {
385 unsigned long addr = __cpa_addr(cpa, i);
386 unsigned int level;
387
388 pte_t *pte = lookup_address(addr, &level);
389
390
391
392
393 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
394 clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE);
395 }
396 mb();
397}
398
399static bool overlaps(unsigned long r1_start, unsigned long r1_end,
400 unsigned long r2_start, unsigned long r2_end)
401{
402 return (r1_start <= r2_end && r1_end >= r2_start) ||
403 (r2_start <= r1_end && r2_end >= r1_start);
404}
405
406#ifdef CONFIG_PCI_BIOS
407
408
409
410
411#define BIOS_PFN PFN_DOWN(BIOS_BEGIN)
412#define BIOS_PFN_END PFN_DOWN(BIOS_END - 1)
413
414static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
415{
416 if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END))
417 return _PAGE_NX;
418 return 0;
419}
420#else
421static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn)
422{
423 return 0;
424}
425#endif
426
427
428
429
430
431
432static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn)
433{
434 unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata));
435
436
437
438
439
440 epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1;
441
442 if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro))
443 return _PAGE_RW;
444 return 0;
445}
446
447
448
449
450
451
452
453
454
455static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end)
456{
457 unsigned long t_end = (unsigned long)_etext - 1;
458 unsigned long t_start = (unsigned long)_text;
459
460 if (overlaps(start, end, t_start, t_end))
461 return _PAGE_NX;
462 return 0;
463}
464
465#if defined(CONFIG_X86_64)
466
467
468
469
470
471
472
473
474
475static pgprotval_t protect_kernel_text_ro(unsigned long start,
476 unsigned long end)
477{
478 unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1;
479 unsigned long t_start = (unsigned long)_text;
480 unsigned int level;
481
482 if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end))
483 return 0;
484
485
486
487
488
489
490
491
492
493
494
495
496 if (lookup_address(start, &level) && (level != PG_LEVEL_4K))
497 return _PAGE_RW;
498 return 0;
499}
500#else
501static pgprotval_t protect_kernel_text_ro(unsigned long start,
502 unsigned long end)
503{
504 return 0;
505}
506#endif
507
508static inline bool conflicts(pgprot_t prot, pgprotval_t val)
509{
510 return (pgprot_val(prot) & ~val) != pgprot_val(prot);
511}
512
513static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
514 unsigned long start, unsigned long end,
515 unsigned long pfn, const char *txt)
516{
517 static const char *lvltxt[] = {
518 [CPA_CONFLICT] = "conflict",
519 [CPA_PROTECT] = "protect",
520 [CPA_DETECT] = "detect",
521 };
522
523 if (warnlvl > cpa_warn_level || !conflicts(prot, val))
524 return;
525
526 pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n",
527 lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot),
528 (unsigned long long)val);
529}
530
531
532
533
534
535
536
537static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
538 unsigned long pfn, unsigned long npg,
539 unsigned long lpsize, int warnlvl)
540{
541 pgprotval_t forbidden, res;
542 unsigned long end;
543
544
545
546
547
548 if (!(pgprot_val(prot) & _PAGE_PRESENT))
549 return prot;
550
551
552 end = start + npg * PAGE_SIZE - 1;
553
554 res = protect_kernel_text(start, end);
555 check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
556 forbidden = res;
557
558
559
560
561
562
563
564 if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
565 res = protect_kernel_text_ro(start, end);
566 check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
567 forbidden |= res;
568 }
569
570
571 res = protect_pci_bios(pfn, pfn + npg - 1);
572 check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX");
573 forbidden |= res;
574
575 res = protect_rodata(pfn, pfn + npg - 1);
576 check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO");
577 forbidden |= res;
578
579 return __pgprot(pgprot_val(prot) & ~forbidden);
580}
581
582
583
584
585
586pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
587 unsigned int *level)
588{
589 p4d_t *p4d;
590 pud_t *pud;
591 pmd_t *pmd;
592
593 *level = PG_LEVEL_NONE;
594
595 if (pgd_none(*pgd))
596 return NULL;
597
598 p4d = p4d_offset(pgd, address);
599 if (p4d_none(*p4d))
600 return NULL;
601
602 *level = PG_LEVEL_512G;
603 if (p4d_large(*p4d) || !p4d_present(*p4d))
604 return (pte_t *)p4d;
605
606 pud = pud_offset(p4d, address);
607 if (pud_none(*pud))
608 return NULL;
609
610 *level = PG_LEVEL_1G;
611 if (pud_large(*pud) || !pud_present(*pud))
612 return (pte_t *)pud;
613
614 pmd = pmd_offset(pud, address);
615 if (pmd_none(*pmd))
616 return NULL;
617
618 *level = PG_LEVEL_2M;
619 if (pmd_large(*pmd) || !pmd_present(*pmd))
620 return (pte_t *)pmd;
621
622 *level = PG_LEVEL_4K;
623
624 return pte_offset_kernel(pmd, address);
625}
626
627
628
629
630
631
632
633
634
635pte_t *lookup_address(unsigned long address, unsigned int *level)
636{
637 return lookup_address_in_pgd(pgd_offset_k(address), address, level);
638}
639EXPORT_SYMBOL_GPL(lookup_address);
640
641
642
643
644
645pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
646 unsigned int *level)
647{
648 return lookup_address_in_pgd(pgd_offset(mm, address), address, level);
649}
650EXPORT_SYMBOL_GPL(lookup_address_in_mm);
651
652static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
653 unsigned int *level)
654{
655 if (cpa->pgd)
656 return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
657 address, level);
658
659 return lookup_address(address, level);
660}
661
662
663
664
665
666pmd_t *lookup_pmd_address(unsigned long address)
667{
668 pgd_t *pgd;
669 p4d_t *p4d;
670 pud_t *pud;
671
672 pgd = pgd_offset_k(address);
673 if (pgd_none(*pgd))
674 return NULL;
675
676 p4d = p4d_offset(pgd, address);
677 if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
678 return NULL;
679
680 pud = pud_offset(p4d, address);
681 if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud))
682 return NULL;
683
684 return pmd_offset(pud, address);
685}
686
687
688
689
690
691
692
693
694
695
696
697
698phys_addr_t slow_virt_to_phys(void *__virt_addr)
699{
700 unsigned long virt_addr = (unsigned long)__virt_addr;
701 phys_addr_t phys_addr;
702 unsigned long offset;
703 enum pg_level level;
704 pte_t *pte;
705
706 pte = lookup_address(virt_addr, &level);
707 BUG_ON(!pte);
708
709
710
711
712
713
714 switch (level) {
715 case PG_LEVEL_1G:
716 phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
717 offset = virt_addr & ~PUD_PAGE_MASK;
718 break;
719 case PG_LEVEL_2M:
720 phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
721 offset = virt_addr & ~PMD_PAGE_MASK;
722 break;
723 default:
724 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
725 offset = virt_addr & ~PAGE_MASK;
726 }
727
728 return (phys_addr_t)(phys_addr | offset);
729}
730EXPORT_SYMBOL_GPL(slow_virt_to_phys);
731
732
733
734
735static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
736{
737
738 set_pte_atomic(kpte, pte);
739#ifdef CONFIG_X86_32
740 if (!SHARED_KERNEL_PMD) {
741 struct page *page;
742
743 list_for_each_entry(page, &pgd_list, lru) {
744 pgd_t *pgd;
745 p4d_t *p4d;
746 pud_t *pud;
747 pmd_t *pmd;
748
749 pgd = (pgd_t *)page_address(page) + pgd_index(address);
750 p4d = p4d_offset(pgd, address);
751 pud = pud_offset(p4d, address);
752 pmd = pmd_offset(pud, address);
753 set_pte_atomic((pte_t *)pmd, pte);
754 }
755 }
756#endif
757}
758
759static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot)
760{
761
762
763
764
765
766
767
768
769
770 if (!(pgprot_val(prot) & _PAGE_PRESENT))
771 pgprot_val(prot) &= ~_PAGE_GLOBAL;
772
773 return prot;
774}
775
776static int __should_split_large_page(pte_t *kpte, unsigned long address,
777 struct cpa_data *cpa)
778{
779 unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn;
780 pgprot_t old_prot, new_prot, req_prot, chk_prot;
781 pte_t new_pte, *tmp;
782 enum pg_level level;
783
784
785
786
787
788 tmp = _lookup_address_cpa(cpa, address, &level);
789 if (tmp != kpte)
790 return 1;
791
792 switch (level) {
793 case PG_LEVEL_2M:
794 old_prot = pmd_pgprot(*(pmd_t *)kpte);
795 old_pfn = pmd_pfn(*(pmd_t *)kpte);
796 cpa_inc_2m_checked();
797 break;
798 case PG_LEVEL_1G:
799 old_prot = pud_pgprot(*(pud_t *)kpte);
800 old_pfn = pud_pfn(*(pud_t *)kpte);
801 cpa_inc_1g_checked();
802 break;
803 default:
804 return -EINVAL;
805 }
806
807 psize = page_level_size(level);
808 pmask = page_level_mask(level);
809
810
811
812
813
814 lpaddr = (address + psize) & pmask;
815 numpages = (lpaddr - address) >> PAGE_SHIFT;
816 if (numpages < cpa->numpages)
817 cpa->numpages = numpages;
818
819
820
821
822
823
824
825
826 req_prot = pgprot_large_2_4k(old_prot);
827
828 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
829 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
830
831
832
833
834
835
836 req_prot = pgprot_4k_2_large(req_prot);
837 req_prot = pgprot_clear_protnone_bits(req_prot);
838 if (pgprot_val(req_prot) & _PAGE_PRESENT)
839 pgprot_val(req_prot) |= _PAGE_PSE;
840
841
842
843
844
845 pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT);
846 cpa->pfn = pfn;
847
848
849
850
851
852 lpaddr = address & pmask;
853 numpages = psize >> PAGE_SHIFT;
854
855
856
857
858
859
860 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
861 psize, CPA_CONFLICT);
862
863 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
864
865
866
867
868 cpa->force_static_prot = 1;
869 return 1;
870 }
871
872
873
874
875
876
877
878
879
880
881 if (pgprot_val(req_prot) == pgprot_val(old_prot)) {
882 cpa_inc_lp_sameprot(level);
883 return 0;
884 }
885
886
887
888
889 if (address != lpaddr || cpa->numpages != numpages)
890 return 1;
891
892
893
894
895
896 new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
897 psize, CPA_DETECT);
898
899
900
901
902
903
904
905
906
907
908 if (pgprot_val(req_prot) != pgprot_val(new_prot))
909 return 1;
910
911
912 new_pte = pfn_pte(old_pfn, new_prot);
913 __set_pmd_pte(kpte, address, new_pte);
914 cpa->flags |= CPA_FLUSHTLB;
915 cpa_inc_lp_preserved(level);
916 return 0;
917}
918
919static int should_split_large_page(pte_t *kpte, unsigned long address,
920 struct cpa_data *cpa)
921{
922 int do_split;
923
924 if (cpa->force_split)
925 return 1;
926
927 spin_lock(&pgd_lock);
928 do_split = __should_split_large_page(kpte, address, cpa);
929 spin_unlock(&pgd_lock);
930
931 return do_split;
932}
933
934static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
935 pgprot_t ref_prot, unsigned long address,
936 unsigned long size)
937{
938 unsigned int npg = PFN_DOWN(size);
939 pgprot_t prot;
940
941
942
943
944
945 if (!cpa->force_static_prot)
946 goto set;
947
948
949 prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
950
951 if (pgprot_val(prot) == pgprot_val(ref_prot))
952 goto set;
953
954
955
956
957
958
959
960
961
962 if (size == PAGE_SIZE)
963 ref_prot = prot;
964 else
965 pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
966set:
967 set_pte(pte, pfn_pte(pfn, ref_prot));
968}
969
970static int
971__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
972 struct page *base)
973{
974 unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
975 pte_t *pbase = (pte_t *)page_address(base);
976 unsigned int i, level;
977 pgprot_t ref_prot;
978 pte_t *tmp;
979
980 spin_lock(&pgd_lock);
981
982
983
984
985 tmp = _lookup_address_cpa(cpa, address, &level);
986 if (tmp != kpte) {
987 spin_unlock(&pgd_lock);
988 return 1;
989 }
990
991 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
992
993 switch (level) {
994 case PG_LEVEL_2M:
995 ref_prot = pmd_pgprot(*(pmd_t *)kpte);
996
997
998
999
1000 ref_prot = pgprot_large_2_4k(ref_prot);
1001 ref_pfn = pmd_pfn(*(pmd_t *)kpte);
1002 lpaddr = address & PMD_MASK;
1003 lpinc = PAGE_SIZE;
1004 break;
1005
1006 case PG_LEVEL_1G:
1007 ref_prot = pud_pgprot(*(pud_t *)kpte);
1008 ref_pfn = pud_pfn(*(pud_t *)kpte);
1009 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
1010 lpaddr = address & PUD_MASK;
1011 lpinc = PMD_SIZE;
1012
1013
1014
1015
1016
1017 if (!(pgprot_val(ref_prot) & _PAGE_PRESENT))
1018 pgprot_val(ref_prot) &= ~_PAGE_PSE;
1019 break;
1020
1021 default:
1022 spin_unlock(&pgd_lock);
1023 return 1;
1024 }
1025
1026 ref_prot = pgprot_clear_protnone_bits(ref_prot);
1027
1028
1029
1030
1031 pfn = ref_pfn;
1032 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
1033 split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
1034
1035 if (virt_addr_valid(address)) {
1036 unsigned long pfn = PFN_DOWN(__pa(address));
1037
1038 if (pfn_range_is_mapped(pfn, pfn + 1))
1039 split_page_count(level);
1040 }
1041
1042
1043
1044
1045
1046
1047
1048
1049 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 flush_tlb_all();
1070 spin_unlock(&pgd_lock);
1071
1072 return 0;
1073}
1074
1075static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
1076 unsigned long address)
1077{
1078 struct page *base;
1079
1080 if (!debug_pagealloc_enabled())
1081 spin_unlock(&cpa_lock);
1082 base = alloc_pages(GFP_KERNEL, 0);
1083 if (!debug_pagealloc_enabled())
1084 spin_lock(&cpa_lock);
1085 if (!base)
1086 return -ENOMEM;
1087
1088 if (__split_large_page(cpa, kpte, address, base))
1089 __free_page(base);
1090
1091 return 0;
1092}
1093
1094static bool try_to_free_pte_page(pte_t *pte)
1095{
1096 int i;
1097
1098 for (i = 0; i < PTRS_PER_PTE; i++)
1099 if (!pte_none(pte[i]))
1100 return false;
1101
1102 free_page((unsigned long)pte);
1103 return true;
1104}
1105
1106static bool try_to_free_pmd_page(pmd_t *pmd)
1107{
1108 int i;
1109
1110 for (i = 0; i < PTRS_PER_PMD; i++)
1111 if (!pmd_none(pmd[i]))
1112 return false;
1113
1114 free_page((unsigned long)pmd);
1115 return true;
1116}
1117
1118static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
1119{
1120 pte_t *pte = pte_offset_kernel(pmd, start);
1121
1122 while (start < end) {
1123 set_pte(pte, __pte(0));
1124
1125 start += PAGE_SIZE;
1126 pte++;
1127 }
1128
1129 if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
1130 pmd_clear(pmd);
1131 return true;
1132 }
1133 return false;
1134}
1135
1136static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
1137 unsigned long start, unsigned long end)
1138{
1139 if (unmap_pte_range(pmd, start, end))
1140 if (try_to_free_pmd_page(pud_pgtable(*pud)))
1141 pud_clear(pud);
1142}
1143
1144static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
1145{
1146 pmd_t *pmd = pmd_offset(pud, start);
1147
1148
1149
1150
1151 if (start & (PMD_SIZE - 1)) {
1152 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1153 unsigned long pre_end = min_t(unsigned long, end, next_page);
1154
1155 __unmap_pmd_range(pud, pmd, start, pre_end);
1156
1157 start = pre_end;
1158 pmd++;
1159 }
1160
1161
1162
1163
1164 while (end - start >= PMD_SIZE) {
1165 if (pmd_large(*pmd))
1166 pmd_clear(pmd);
1167 else
1168 __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
1169
1170 start += PMD_SIZE;
1171 pmd++;
1172 }
1173
1174
1175
1176
1177 if (start < end)
1178 return __unmap_pmd_range(pud, pmd, start, end);
1179
1180
1181
1182
1183 if (!pud_none(*pud))
1184 if (try_to_free_pmd_page(pud_pgtable(*pud)))
1185 pud_clear(pud);
1186}
1187
1188static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
1189{
1190 pud_t *pud = pud_offset(p4d, start);
1191
1192
1193
1194
1195 if (start & (PUD_SIZE - 1)) {
1196 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1197 unsigned long pre_end = min_t(unsigned long, end, next_page);
1198
1199 unmap_pmd_range(pud, start, pre_end);
1200
1201 start = pre_end;
1202 pud++;
1203 }
1204
1205
1206
1207
1208 while (end - start >= PUD_SIZE) {
1209
1210 if (pud_large(*pud))
1211 pud_clear(pud);
1212 else
1213 unmap_pmd_range(pud, start, start + PUD_SIZE);
1214
1215 start += PUD_SIZE;
1216 pud++;
1217 }
1218
1219
1220
1221
1222 if (start < end)
1223 unmap_pmd_range(pud, start, end);
1224
1225
1226
1227
1228
1229}
1230
1231static int alloc_pte_page(pmd_t *pmd)
1232{
1233 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
1234 if (!pte)
1235 return -1;
1236
1237 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
1238 return 0;
1239}
1240
1241static int alloc_pmd_page(pud_t *pud)
1242{
1243 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
1244 if (!pmd)
1245 return -1;
1246
1247 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
1248 return 0;
1249}
1250
1251static void populate_pte(struct cpa_data *cpa,
1252 unsigned long start, unsigned long end,
1253 unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
1254{
1255 pte_t *pte;
1256
1257 pte = pte_offset_kernel(pmd, start);
1258
1259 pgprot = pgprot_clear_protnone_bits(pgprot);
1260
1261 while (num_pages-- && start < end) {
1262 set_pte(pte, pfn_pte(cpa->pfn, pgprot));
1263
1264 start += PAGE_SIZE;
1265 cpa->pfn++;
1266 pte++;
1267 }
1268}
1269
1270static long populate_pmd(struct cpa_data *cpa,
1271 unsigned long start, unsigned long end,
1272 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
1273{
1274 long cur_pages = 0;
1275 pmd_t *pmd;
1276 pgprot_t pmd_pgprot;
1277
1278
1279
1280
1281 if (start & (PMD_SIZE - 1)) {
1282 unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
1283 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
1284
1285 pre_end = min_t(unsigned long, pre_end, next_page);
1286 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1287 cur_pages = min_t(unsigned int, num_pages, cur_pages);
1288
1289
1290
1291
1292 pmd = pmd_offset(pud, start);
1293 if (pmd_none(*pmd))
1294 if (alloc_pte_page(pmd))
1295 return -1;
1296
1297 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
1298
1299 start = pre_end;
1300 }
1301
1302
1303
1304
1305 if (num_pages == cur_pages)
1306 return cur_pages;
1307
1308 pmd_pgprot = pgprot_4k_2_large(pgprot);
1309
1310 while (end - start >= PMD_SIZE) {
1311
1312
1313
1314
1315 if (pud_none(*pud))
1316 if (alloc_pmd_page(pud))
1317 return -1;
1318
1319 pmd = pmd_offset(pud, start);
1320
1321 set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
1322 canon_pgprot(pmd_pgprot))));
1323
1324 start += PMD_SIZE;
1325 cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
1326 cur_pages += PMD_SIZE >> PAGE_SHIFT;
1327 }
1328
1329
1330
1331
1332 if (start < end) {
1333 pmd = pmd_offset(pud, start);
1334 if (pmd_none(*pmd))
1335 if (alloc_pte_page(pmd))
1336 return -1;
1337
1338 populate_pte(cpa, start, end, num_pages - cur_pages,
1339 pmd, pgprot);
1340 }
1341 return num_pages;
1342}
1343
1344static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
1345 pgprot_t pgprot)
1346{
1347 pud_t *pud;
1348 unsigned long end;
1349 long cur_pages = 0;
1350 pgprot_t pud_pgprot;
1351
1352 end = start + (cpa->numpages << PAGE_SHIFT);
1353
1354
1355
1356
1357
1358 if (start & (PUD_SIZE - 1)) {
1359 unsigned long pre_end;
1360 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
1361
1362 pre_end = min_t(unsigned long, end, next_page);
1363 cur_pages = (pre_end - start) >> PAGE_SHIFT;
1364 cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
1365
1366 pud = pud_offset(p4d, start);
1367
1368
1369
1370
1371 if (pud_none(*pud))
1372 if (alloc_pmd_page(pud))
1373 return -1;
1374
1375 cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
1376 pud, pgprot);
1377 if (cur_pages < 0)
1378 return cur_pages;
1379
1380 start = pre_end;
1381 }
1382
1383
1384 if (cpa->numpages == cur_pages)
1385 return cur_pages;
1386
1387 pud = pud_offset(p4d, start);
1388 pud_pgprot = pgprot_4k_2_large(pgprot);
1389
1390
1391
1392
1393 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
1394 set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
1395 canon_pgprot(pud_pgprot))));
1396
1397 start += PUD_SIZE;
1398 cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
1399 cur_pages += PUD_SIZE >> PAGE_SHIFT;
1400 pud++;
1401 }
1402
1403
1404 if (start < end) {
1405 long tmp;
1406
1407 pud = pud_offset(p4d, start);
1408 if (pud_none(*pud))
1409 if (alloc_pmd_page(pud))
1410 return -1;
1411
1412 tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
1413 pud, pgprot);
1414 if (tmp < 0)
1415 return cur_pages;
1416
1417 cur_pages += tmp;
1418 }
1419 return cur_pages;
1420}
1421
1422
1423
1424
1425
1426static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1427{
1428 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
1429 pud_t *pud = NULL;
1430 p4d_t *p4d;
1431 pgd_t *pgd_entry;
1432 long ret;
1433
1434 pgd_entry = cpa->pgd + pgd_index(addr);
1435
1436 if (pgd_none(*pgd_entry)) {
1437 p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
1438 if (!p4d)
1439 return -1;
1440
1441 set_pgd(pgd_entry, __pgd(__pa(p4d) | _KERNPG_TABLE));
1442 }
1443
1444
1445
1446
1447 p4d = p4d_offset(pgd_entry, addr);
1448 if (p4d_none(*p4d)) {
1449 pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
1450 if (!pud)
1451 return -1;
1452
1453 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
1454 }
1455
1456 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
1457 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
1458
1459 ret = populate_pud(cpa, addr, p4d, pgprot);
1460 if (ret < 0) {
1461
1462
1463
1464
1465
1466 unmap_pud_range(p4d, addr,
1467 addr + (cpa->numpages << PAGE_SHIFT));
1468 return ret;
1469 }
1470
1471 cpa->numpages = ret;
1472 return 0;
1473}
1474
1475static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
1476 int primary)
1477{
1478 if (cpa->pgd) {
1479
1480
1481
1482
1483
1484 return populate_pgd(cpa, vaddr);
1485 }
1486
1487
1488
1489
1490 if (!primary) {
1491 cpa->numpages = 1;
1492 return 0;
1493 }
1494
1495
1496
1497
1498
1499
1500
1501
1502 if (within(vaddr, PAGE_OFFSET,
1503 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
1504 cpa->numpages = 1;
1505 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
1506 return 0;
1507
1508 } else if (__cpa_pfn_in_highmap(cpa->pfn)) {
1509
1510 return -EFAULT;
1511 } else {
1512 WARN(1, KERN_WARNING "CPA: called for zero pte. "
1513 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
1514 *cpa->vaddr);
1515
1516 return -EFAULT;
1517 }
1518}
1519
1520static int __change_page_attr(struct cpa_data *cpa, int primary)
1521{
1522 unsigned long address;
1523 int do_split, err;
1524 unsigned int level;
1525 pte_t *kpte, old_pte;
1526
1527 address = __cpa_addr(cpa, cpa->curpage);
1528repeat:
1529 kpte = _lookup_address_cpa(cpa, address, &level);
1530 if (!kpte)
1531 return __cpa_process_fault(cpa, address, primary);
1532
1533 old_pte = *kpte;
1534 if (pte_none(old_pte))
1535 return __cpa_process_fault(cpa, address, primary);
1536
1537 if (level == PG_LEVEL_4K) {
1538 pte_t new_pte;
1539 pgprot_t new_prot = pte_pgprot(old_pte);
1540 unsigned long pfn = pte_pfn(old_pte);
1541
1542 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
1543 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
1544
1545 cpa_inc_4k_install();
1546
1547 new_prot = static_protections(new_prot, address, pfn, 1, 0,
1548 CPA_PROTECT);
1549
1550 new_prot = pgprot_clear_protnone_bits(new_prot);
1551
1552
1553
1554
1555
1556
1557 new_pte = pfn_pte(pfn, new_prot);
1558 cpa->pfn = pfn;
1559
1560
1561
1562 if (pte_val(old_pte) != pte_val(new_pte)) {
1563 set_pte_atomic(kpte, new_pte);
1564 cpa->flags |= CPA_FLUSHTLB;
1565 }
1566 cpa->numpages = 1;
1567 return 0;
1568 }
1569
1570
1571
1572
1573
1574 do_split = should_split_large_page(kpte, address, cpa);
1575
1576
1577
1578
1579
1580 if (do_split <= 0)
1581 return do_split;
1582
1583
1584
1585
1586 err = split_large_page(cpa, kpte, address);
1587 if (!err)
1588 goto repeat;
1589
1590 return err;
1591}
1592
1593static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
1594
1595static int cpa_process_alias(struct cpa_data *cpa)
1596{
1597 struct cpa_data alias_cpa;
1598 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
1599 unsigned long vaddr;
1600 int ret;
1601
1602 if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
1603 return 0;
1604
1605
1606
1607
1608
1609 vaddr = __cpa_addr(cpa, cpa->curpage);
1610 if (!(within(vaddr, PAGE_OFFSET,
1611 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
1612
1613 alias_cpa = *cpa;
1614 alias_cpa.vaddr = &laddr;
1615 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1616 alias_cpa.curpage = 0;
1617
1618 cpa->force_flush_all = 1;
1619
1620 ret = __change_page_attr_set_clr(&alias_cpa, 0);
1621 if (ret)
1622 return ret;
1623 }
1624
1625#ifdef CONFIG_X86_64
1626
1627
1628
1629
1630
1631 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
1632 __cpa_pfn_in_highmap(cpa->pfn)) {
1633 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
1634 __START_KERNEL_map - phys_base;
1635 alias_cpa = *cpa;
1636 alias_cpa.vaddr = &temp_cpa_vaddr;
1637 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
1638 alias_cpa.curpage = 0;
1639
1640 cpa->force_flush_all = 1;
1641
1642
1643
1644
1645 __change_page_attr_set_clr(&alias_cpa, 0);
1646 }
1647#endif
1648
1649 return 0;
1650}
1651
1652static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1653{
1654 unsigned long numpages = cpa->numpages;
1655 unsigned long rempages = numpages;
1656 int ret = 0;
1657
1658 while (rempages) {
1659
1660
1661
1662
1663 cpa->numpages = rempages;
1664
1665 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
1666 cpa->numpages = 1;
1667
1668 if (!debug_pagealloc_enabled())
1669 spin_lock(&cpa_lock);
1670 ret = __change_page_attr(cpa, checkalias);
1671 if (!debug_pagealloc_enabled())
1672 spin_unlock(&cpa_lock);
1673 if (ret)
1674 goto out;
1675
1676 if (checkalias) {
1677 ret = cpa_process_alias(cpa);
1678 if (ret)
1679 goto out;
1680 }
1681
1682
1683
1684
1685
1686
1687 BUG_ON(cpa->numpages > rempages || !cpa->numpages);
1688 rempages -= cpa->numpages;
1689 cpa->curpage += cpa->numpages;
1690 }
1691
1692out:
1693
1694 cpa->numpages = numpages;
1695 return ret;
1696}
1697
1698static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1699 pgprot_t mask_set, pgprot_t mask_clr,
1700 int force_split, int in_flag,
1701 struct page **pages)
1702{
1703 struct cpa_data cpa;
1704 int ret, cache, checkalias;
1705
1706 memset(&cpa, 0, sizeof(cpa));
1707
1708
1709
1710
1711
1712 mask_set = canon_pgprot(mask_set);
1713
1714 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
1715 return 0;
1716
1717
1718 if (in_flag & CPA_ARRAY) {
1719 int i;
1720 for (i = 0; i < numpages; i++) {
1721 if (addr[i] & ~PAGE_MASK) {
1722 addr[i] &= PAGE_MASK;
1723 WARN_ON_ONCE(1);
1724 }
1725 }
1726 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
1727
1728
1729
1730
1731 if (*addr & ~PAGE_MASK) {
1732 *addr &= PAGE_MASK;
1733
1734
1735
1736 WARN_ON_ONCE(1);
1737 }
1738 }
1739
1740
1741 kmap_flush_unused();
1742
1743 vm_unmap_aliases();
1744
1745 cpa.vaddr = addr;
1746 cpa.pages = pages;
1747 cpa.numpages = numpages;
1748 cpa.mask_set = mask_set;
1749 cpa.mask_clr = mask_clr;
1750 cpa.flags = 0;
1751 cpa.curpage = 0;
1752 cpa.force_split = force_split;
1753
1754 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
1755 cpa.flags |= in_flag;
1756
1757
1758 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
1759
1760 if (in_flag & CPA_NO_CHECK_ALIAS)
1761 checkalias = 0;
1762
1763 ret = __change_page_attr_set_clr(&cpa, checkalias);
1764
1765
1766
1767
1768 if (!(cpa.flags & CPA_FLUSHTLB))
1769 goto out;
1770
1771
1772
1773
1774
1775 cache = !!pgprot2cachemode(mask_set);
1776
1777
1778
1779
1780 if (ret) {
1781 cpa_flush_all(cache);
1782 goto out;
1783 }
1784
1785 cpa_flush(&cpa, cache);
1786out:
1787 return ret;
1788}
1789
1790static inline int change_page_attr_set(unsigned long *addr, int numpages,
1791 pgprot_t mask, int array)
1792{
1793 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
1794 (array ? CPA_ARRAY : 0), NULL);
1795}
1796
1797static inline int change_page_attr_clear(unsigned long *addr, int numpages,
1798 pgprot_t mask, int array)
1799{
1800 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
1801 (array ? CPA_ARRAY : 0), NULL);
1802}
1803
1804static inline int cpa_set_pages_array(struct page **pages, int numpages,
1805 pgprot_t mask)
1806{
1807 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
1808 CPA_PAGES_ARRAY, pages);
1809}
1810
1811static inline int cpa_clear_pages_array(struct page **pages, int numpages,
1812 pgprot_t mask)
1813{
1814 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
1815 CPA_PAGES_ARRAY, pages);
1816}
1817
1818
1819
1820
1821
1822
1823
1824int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot)
1825{
1826 return change_page_attr_set_clr(&addr, numpages, prot,
1827 __pgprot(~pgprot_val(prot)), 0, 0,
1828 NULL);
1829}
1830
1831int _set_memory_uc(unsigned long addr, int numpages)
1832{
1833
1834
1835
1836
1837
1838
1839 return change_page_attr_set(&addr, numpages,
1840 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1841 0);
1842}
1843
1844int set_memory_uc(unsigned long addr, int numpages)
1845{
1846 int ret;
1847
1848
1849
1850
1851 ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1852 _PAGE_CACHE_MODE_UC_MINUS, NULL);
1853 if (ret)
1854 goto out_err;
1855
1856 ret = _set_memory_uc(addr, numpages);
1857 if (ret)
1858 goto out_free;
1859
1860 return 0;
1861
1862out_free:
1863 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1864out_err:
1865 return ret;
1866}
1867EXPORT_SYMBOL(set_memory_uc);
1868
1869int _set_memory_wc(unsigned long addr, int numpages)
1870{
1871 int ret;
1872
1873 ret = change_page_attr_set(&addr, numpages,
1874 cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
1875 0);
1876 if (!ret) {
1877 ret = change_page_attr_set_clr(&addr, numpages,
1878 cachemode2pgprot(_PAGE_CACHE_MODE_WC),
1879 __pgprot(_PAGE_CACHE_MASK),
1880 0, 0, NULL);
1881 }
1882 return ret;
1883}
1884
1885int set_memory_wc(unsigned long addr, int numpages)
1886{
1887 int ret;
1888
1889 ret = memtype_reserve(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1890 _PAGE_CACHE_MODE_WC, NULL);
1891 if (ret)
1892 return ret;
1893
1894 ret = _set_memory_wc(addr, numpages);
1895 if (ret)
1896 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1897
1898 return ret;
1899}
1900EXPORT_SYMBOL(set_memory_wc);
1901
1902int _set_memory_wt(unsigned long addr, int numpages)
1903{
1904 return change_page_attr_set(&addr, numpages,
1905 cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
1906}
1907
1908int _set_memory_wb(unsigned long addr, int numpages)
1909{
1910
1911 return change_page_attr_clear(&addr, numpages,
1912 __pgprot(_PAGE_CACHE_MASK), 0);
1913}
1914
1915int set_memory_wb(unsigned long addr, int numpages)
1916{
1917 int ret;
1918
1919 ret = _set_memory_wb(addr, numpages);
1920 if (ret)
1921 return ret;
1922
1923 memtype_free(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1924 return 0;
1925}
1926EXPORT_SYMBOL(set_memory_wb);
1927
1928int set_memory_x(unsigned long addr, int numpages)
1929{
1930 if (!(__supported_pte_mask & _PAGE_NX))
1931 return 0;
1932
1933 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
1934}
1935
1936int set_memory_nx(unsigned long addr, int numpages)
1937{
1938 if (!(__supported_pte_mask & _PAGE_NX))
1939 return 0;
1940
1941 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
1942}
1943
1944int set_memory_ro(unsigned long addr, int numpages)
1945{
1946 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
1947}
1948
1949int set_memory_rw(unsigned long addr, int numpages)
1950{
1951 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
1952}
1953
1954int set_memory_np(unsigned long addr, int numpages)
1955{
1956 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
1957}
1958
1959int set_memory_np_noalias(unsigned long addr, int numpages)
1960{
1961 int cpa_flags = CPA_NO_CHECK_ALIAS;
1962
1963 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1964 __pgprot(_PAGE_PRESENT), 0,
1965 cpa_flags, NULL);
1966}
1967
1968int set_memory_4k(unsigned long addr, int numpages)
1969{
1970 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1971 __pgprot(0), 1, 0, NULL);
1972}
1973
1974int set_memory_nonglobal(unsigned long addr, int numpages)
1975{
1976 return change_page_attr_clear(&addr, numpages,
1977 __pgprot(_PAGE_GLOBAL), 0);
1978}
1979
1980int set_memory_global(unsigned long addr, int numpages)
1981{
1982 return change_page_attr_set(&addr, numpages,
1983 __pgprot(_PAGE_GLOBAL), 0);
1984}
1985
1986
1987
1988
1989
1990static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
1991{
1992 struct cpa_data cpa;
1993 int ret;
1994
1995
1996 if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
1997 addr &= PAGE_MASK;
1998
1999 memset(&cpa, 0, sizeof(cpa));
2000 cpa.vaddr = &addr;
2001 cpa.numpages = numpages;
2002 cpa.mask_set = enc ? __pgprot(_PAGE_ENC) : __pgprot(0);
2003 cpa.mask_clr = enc ? __pgprot(0) : __pgprot(_PAGE_ENC);
2004 cpa.pgd = init_mm.pgd;
2005
2006
2007 kmap_flush_unused();
2008 vm_unmap_aliases();
2009
2010
2011
2012
2013 cpa_flush(&cpa, !this_cpu_has(X86_FEATURE_SME_COHERENT));
2014
2015 ret = __change_page_attr_set_clr(&cpa, 1);
2016
2017
2018
2019
2020
2021
2022
2023
2024 cpa_flush(&cpa, 0);
2025
2026
2027
2028
2029
2030 notify_range_enc_status_changed(addr, numpages, enc);
2031
2032 return ret;
2033}
2034
2035static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
2036{
2037 if (hv_is_isolation_supported())
2038 return hv_set_mem_host_visibility(addr, numpages, !enc);
2039
2040 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
2041 return __set_memory_enc_pgtable(addr, numpages, enc);
2042
2043 return 0;
2044}
2045
2046int set_memory_encrypted(unsigned long addr, int numpages)
2047{
2048 return __set_memory_enc_dec(addr, numpages, true);
2049}
2050EXPORT_SYMBOL_GPL(set_memory_encrypted);
2051
2052int set_memory_decrypted(unsigned long addr, int numpages)
2053{
2054 return __set_memory_enc_dec(addr, numpages, false);
2055}
2056EXPORT_SYMBOL_GPL(set_memory_decrypted);
2057
2058int set_pages_uc(struct page *page, int numpages)
2059{
2060 unsigned long addr = (unsigned long)page_address(page);
2061
2062 return set_memory_uc(addr, numpages);
2063}
2064EXPORT_SYMBOL(set_pages_uc);
2065
2066static int _set_pages_array(struct page **pages, int numpages,
2067 enum page_cache_mode new_type)
2068{
2069 unsigned long start;
2070 unsigned long end;
2071 enum page_cache_mode set_type;
2072 int i;
2073 int free_idx;
2074 int ret;
2075
2076 for (i = 0; i < numpages; i++) {
2077 if (PageHighMem(pages[i]))
2078 continue;
2079 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2080 end = start + PAGE_SIZE;
2081 if (memtype_reserve(start, end, new_type, NULL))
2082 goto err_out;
2083 }
2084
2085
2086 set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
2087 _PAGE_CACHE_MODE_UC_MINUS : new_type;
2088
2089 ret = cpa_set_pages_array(pages, numpages,
2090 cachemode2pgprot(set_type));
2091 if (!ret && new_type == _PAGE_CACHE_MODE_WC)
2092 ret = change_page_attr_set_clr(NULL, numpages,
2093 cachemode2pgprot(
2094 _PAGE_CACHE_MODE_WC),
2095 __pgprot(_PAGE_CACHE_MASK),
2096 0, CPA_PAGES_ARRAY, pages);
2097 if (ret)
2098 goto err_out;
2099 return 0;
2100err_out:
2101 free_idx = i;
2102 for (i = 0; i < free_idx; i++) {
2103 if (PageHighMem(pages[i]))
2104 continue;
2105 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2106 end = start + PAGE_SIZE;
2107 memtype_free(start, end);
2108 }
2109 return -EINVAL;
2110}
2111
2112int set_pages_array_uc(struct page **pages, int numpages)
2113{
2114 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS);
2115}
2116EXPORT_SYMBOL(set_pages_array_uc);
2117
2118int set_pages_array_wc(struct page **pages, int numpages)
2119{
2120 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC);
2121}
2122EXPORT_SYMBOL(set_pages_array_wc);
2123
2124int set_pages_array_wt(struct page **pages, int numpages)
2125{
2126 return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WT);
2127}
2128EXPORT_SYMBOL_GPL(set_pages_array_wt);
2129
2130int set_pages_wb(struct page *page, int numpages)
2131{
2132 unsigned long addr = (unsigned long)page_address(page);
2133
2134 return set_memory_wb(addr, numpages);
2135}
2136EXPORT_SYMBOL(set_pages_wb);
2137
2138int set_pages_array_wb(struct page **pages, int numpages)
2139{
2140 int retval;
2141 unsigned long start;
2142 unsigned long end;
2143 int i;
2144
2145
2146 retval = cpa_clear_pages_array(pages, numpages,
2147 __pgprot(_PAGE_CACHE_MASK));
2148 if (retval)
2149 return retval;
2150
2151 for (i = 0; i < numpages; i++) {
2152 if (PageHighMem(pages[i]))
2153 continue;
2154 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
2155 end = start + PAGE_SIZE;
2156 memtype_free(start, end);
2157 }
2158
2159 return 0;
2160}
2161EXPORT_SYMBOL(set_pages_array_wb);
2162
2163int set_pages_ro(struct page *page, int numpages)
2164{
2165 unsigned long addr = (unsigned long)page_address(page);
2166
2167 return set_memory_ro(addr, numpages);
2168}
2169
2170int set_pages_rw(struct page *page, int numpages)
2171{
2172 unsigned long addr = (unsigned long)page_address(page);
2173
2174 return set_memory_rw(addr, numpages);
2175}
2176
2177static int __set_pages_p(struct page *page, int numpages)
2178{
2179 unsigned long tempaddr = (unsigned long) page_address(page);
2180 struct cpa_data cpa = { .vaddr = &tempaddr,
2181 .pgd = NULL,
2182 .numpages = numpages,
2183 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2184 .mask_clr = __pgprot(0),
2185 .flags = 0};
2186
2187
2188
2189
2190
2191
2192
2193 return __change_page_attr_set_clr(&cpa, 0);
2194}
2195
2196static int __set_pages_np(struct page *page, int numpages)
2197{
2198 unsigned long tempaddr = (unsigned long) page_address(page);
2199 struct cpa_data cpa = { .vaddr = &tempaddr,
2200 .pgd = NULL,
2201 .numpages = numpages,
2202 .mask_set = __pgprot(0),
2203 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2204 .flags = 0};
2205
2206
2207
2208
2209
2210
2211
2212 return __change_page_attr_set_clr(&cpa, 0);
2213}
2214
2215int set_direct_map_invalid_noflush(struct page *page)
2216{
2217 return __set_pages_np(page, 1);
2218}
2219
2220int set_direct_map_default_noflush(struct page *page)
2221{
2222 return __set_pages_p(page, 1);
2223}
2224
2225#ifdef CONFIG_DEBUG_PAGEALLOC
2226void __kernel_map_pages(struct page *page, int numpages, int enable)
2227{
2228 if (PageHighMem(page))
2229 return;
2230 if (!enable) {
2231 debug_check_no_locks_freed(page_address(page),
2232 numpages * PAGE_SIZE);
2233 }
2234
2235
2236
2237
2238
2239
2240 if (enable)
2241 __set_pages_p(page, numpages);
2242 else
2243 __set_pages_np(page, numpages);
2244
2245
2246
2247
2248
2249
2250
2251 preempt_disable();
2252 __flush_tlb_all();
2253 preempt_enable();
2254
2255 arch_flush_lazy_mmu_mode();
2256}
2257#endif
2258
2259bool kernel_page_present(struct page *page)
2260{
2261 unsigned int level;
2262 pte_t *pte;
2263
2264 if (PageHighMem(page))
2265 return false;
2266
2267 pte = lookup_address((unsigned long)page_address(page), &level);
2268 return (pte_val(*pte) & _PAGE_PRESENT);
2269}
2270
2271int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
2272 unsigned numpages, unsigned long page_flags)
2273{
2274 int retval = -EINVAL;
2275
2276 struct cpa_data cpa = {
2277 .vaddr = &address,
2278 .pfn = pfn,
2279 .pgd = pgd,
2280 .numpages = numpages,
2281 .mask_set = __pgprot(0),
2282 .mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
2283 .flags = 0,
2284 };
2285
2286 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2287
2288 if (!(__supported_pte_mask & _PAGE_NX))
2289 goto out;
2290
2291 if (!(page_flags & _PAGE_ENC))
2292 cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
2293
2294 cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
2295
2296 retval = __change_page_attr_set_clr(&cpa, 0);
2297 __flush_tlb_all();
2298
2299out:
2300 return retval;
2301}
2302
2303
2304
2305
2306
2307
2308int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
2309 unsigned long numpages)
2310{
2311 int retval;
2312
2313
2314
2315
2316
2317
2318
2319 struct cpa_data cpa = {
2320 .vaddr = &address,
2321 .pfn = 0,
2322 .pgd = pgd,
2323 .numpages = numpages,
2324 .mask_set = __pgprot(0),
2325 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
2326 .flags = 0,
2327 };
2328
2329 WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
2330
2331 retval = __change_page_attr_set_clr(&cpa, 0);
2332 __flush_tlb_all();
2333
2334 return retval;
2335}
2336
2337
2338
2339
2340
2341#ifdef CONFIG_CPA_DEBUG
2342#include "cpa-test.c"
2343#endif
2344