1
2
3
4
5#include <linux/highmem.h>
6#include <linux/bootmem.h>
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/slab.h>
10#include <linux/mm.h>
11#include <linux/interrupt.h>
12#include <linux/seq_file.h>
13#include <linux/debugfs.h>
14#include <linux/pfn.h>
15#include <linux/percpu.h>
16
17#include <asm/e820.h>
18#include <asm/processor.h>
19#include <asm/tlbflush.h>
20#include <asm/sections.h>
21#include <asm/setup.h>
22#include <asm/uaccess.h>
23#include <asm/pgalloc.h>
24#include <asm/proto.h>
25#include <asm/pat.h>
26
27
28
29
30struct cpa_data {
31 unsigned long *vaddr;
32 pgprot_t mask_set;
33 pgprot_t mask_clr;
34 int numpages;
35 int flags;
36 unsigned long pfn;
37 unsigned force_split : 1;
38 int curpage;
39 struct page **pages;
40};
41
42
43
44
45
46
47
48static DEFINE_SPINLOCK(cpa_lock);
49
50#define CPA_FLUSHTLB 1
51#define CPA_ARRAY 2
52#define CPA_PAGES_ARRAY 4
53
54#ifdef CONFIG_PROC_FS
55static unsigned long direct_pages_count[PG_LEVEL_NUM];
56
57void update_page_count(int level, unsigned long pages)
58{
59 unsigned long flags;
60
61
62 spin_lock_irqsave(&pgd_lock, flags);
63 direct_pages_count[level] += pages;
64 spin_unlock_irqrestore(&pgd_lock, flags);
65}
66
67static void split_page_count(int level)
68{
69 direct_pages_count[level]--;
70 direct_pages_count[level - 1] += PTRS_PER_PTE;
71}
72
73void arch_report_meminfo(struct seq_file *m)
74{
75 seq_printf(m, "DirectMap4k: %8lu kB\n",
76 direct_pages_count[PG_LEVEL_4K] << 2);
77#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
78 seq_printf(m, "DirectMap2M: %8lu kB\n",
79 direct_pages_count[PG_LEVEL_2M] << 11);
80#else
81 seq_printf(m, "DirectMap4M: %8lu kB\n",
82 direct_pages_count[PG_LEVEL_2M] << 12);
83#endif
84#ifdef CONFIG_X86_64
85 if (direct_gbpages)
86 seq_printf(m, "DirectMap1G: %8lu kB\n",
87 direct_pages_count[PG_LEVEL_1G] << 20);
88#endif
89}
90#else
91static inline void split_page_count(int level) { }
92#endif
93
94#ifdef CONFIG_X86_64
95
96static inline unsigned long highmap_start_pfn(void)
97{
98 return __pa(_text) >> PAGE_SHIFT;
99}
100
101static inline unsigned long highmap_end_pfn(void)
102{
103 return __pa(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
104}
105
106#endif
107
108#ifdef CONFIG_DEBUG_PAGEALLOC
109# define debug_pagealloc 1
110#else
111# define debug_pagealloc 0
112#endif
113
114static inline int
115within(unsigned long addr, unsigned long start, unsigned long end)
116{
117 return addr >= start && addr < end;
118}
119
120
121
122
123
124
125
126
127
128
129
130
131
132void clflush_cache_range(void *vaddr, unsigned int size)
133{
134 void *vend = vaddr + size - 1;
135
136 mb();
137
138 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
139 clflush(vaddr);
140
141
142
143 clflush(vend);
144
145 mb();
146}
147EXPORT_SYMBOL_GPL(clflush_cache_range);
148
149static void __cpa_flush_all(void *arg)
150{
151 unsigned long cache = (unsigned long)arg;
152
153
154
155
156
157 __flush_tlb_all();
158
159 if (cache && boot_cpu_data.x86 >= 4)
160 wbinvd();
161}
162
163static void cpa_flush_all(unsigned long cache)
164{
165 BUG_ON(irqs_disabled());
166
167 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
168}
169
170static void __cpa_flush_range(void *arg)
171{
172
173
174
175
176
177 __flush_tlb_all();
178}
179
180static void cpa_flush_range(unsigned long start, int numpages, int cache)
181{
182 unsigned int i, level;
183 unsigned long addr;
184
185 BUG_ON(irqs_disabled());
186 WARN_ON(PAGE_ALIGN(start) != start);
187
188 on_each_cpu(__cpa_flush_range, NULL, 1);
189
190 if (!cache)
191 return;
192
193
194
195
196
197
198
199 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
200 pte_t *pte = lookup_address(addr, &level);
201
202
203
204
205 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
206 clflush_cache_range((void *) addr, PAGE_SIZE);
207 }
208}
209
210static void cpa_flush_array(unsigned long *start, int numpages, int cache,
211 int in_flags, struct page **pages)
212{
213 unsigned int i, level;
214 unsigned long do_wbinvd = cache && numpages >= 1024;
215
216 BUG_ON(irqs_disabled());
217
218 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
219
220 if (!cache || do_wbinvd)
221 return;
222
223
224
225
226
227
228
229 for (i = 0; i < numpages; i++) {
230 unsigned long addr;
231 pte_t *pte;
232
233 if (in_flags & CPA_PAGES_ARRAY)
234 addr = (unsigned long)page_address(pages[i]);
235 else
236 addr = start[i];
237
238 pte = lookup_address(addr, &level);
239
240
241
242
243 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
244 clflush_cache_range((void *)addr, PAGE_SIZE);
245 }
246}
247
248
249
250
251
252
253
254static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
255 unsigned long pfn)
256{
257 pgprot_t forbidden = __pgprot(0);
258
259
260
261
262
263 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
264 pgprot_val(forbidden) |= _PAGE_NX;
265
266
267
268
269
270
271 if (within(address, (unsigned long)_text, (unsigned long)_etext))
272 pgprot_val(forbidden) |= _PAGE_NX;
273
274
275
276
277
278 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
279 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
280 pgprot_val(forbidden) |= _PAGE_RW;
281
282 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
283
284 return prot;
285}
286
287
288
289
290
291
292
293
294
295pte_t *lookup_address(unsigned long address, unsigned int *level)
296{
297 pgd_t *pgd = pgd_offset_k(address);
298 pud_t *pud;
299 pmd_t *pmd;
300
301 *level = PG_LEVEL_NONE;
302
303 if (pgd_none(*pgd))
304 return NULL;
305
306 pud = pud_offset(pgd, address);
307 if (pud_none(*pud))
308 return NULL;
309
310 *level = PG_LEVEL_1G;
311 if (pud_large(*pud) || !pud_present(*pud))
312 return (pte_t *)pud;
313
314 pmd = pmd_offset(pud, address);
315 if (pmd_none(*pmd))
316 return NULL;
317
318 *level = PG_LEVEL_2M;
319 if (pmd_large(*pmd) || !pmd_present(*pmd))
320 return (pte_t *)pmd;
321
322 *level = PG_LEVEL_4K;
323
324 return pte_offset_kernel(pmd, address);
325}
326EXPORT_SYMBOL_GPL(lookup_address);
327
328
329
330
331static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
332{
333
334 set_pte_atomic(kpte, pte);
335#ifdef CONFIG_X86_32
336 if (!SHARED_KERNEL_PMD) {
337 struct page *page;
338
339 list_for_each_entry(page, &pgd_list, lru) {
340 pgd_t *pgd;
341 pud_t *pud;
342 pmd_t *pmd;
343
344 pgd = (pgd_t *)page_address(page) + pgd_index(address);
345 pud = pud_offset(pgd, address);
346 pmd = pmd_offset(pud, address);
347 set_pte_atomic((pte_t *)pmd, pte);
348 }
349 }
350#endif
351}
352
353static int
354try_preserve_large_page(pte_t *kpte, unsigned long address,
355 struct cpa_data *cpa)
356{
357 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
358 pte_t new_pte, old_pte, *tmp;
359 pgprot_t old_prot, new_prot;
360 int i, do_split = 1;
361 unsigned int level;
362
363 if (cpa->force_split)
364 return 1;
365
366 spin_lock_irqsave(&pgd_lock, flags);
367
368
369
370
371 tmp = lookup_address(address, &level);
372 if (tmp != kpte)
373 goto out_unlock;
374
375 switch (level) {
376 case PG_LEVEL_2M:
377 psize = PMD_PAGE_SIZE;
378 pmask = PMD_PAGE_MASK;
379 break;
380#ifdef CONFIG_X86_64
381 case PG_LEVEL_1G:
382 psize = PUD_PAGE_SIZE;
383 pmask = PUD_PAGE_MASK;
384 break;
385#endif
386 default:
387 do_split = -EINVAL;
388 goto out_unlock;
389 }
390
391
392
393
394
395 nextpage_addr = (address + psize) & pmask;
396 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
397 if (numpages < cpa->numpages)
398 cpa->numpages = numpages;
399
400
401
402
403 old_pte = *kpte;
404 old_prot = new_prot = pte_pgprot(old_pte);
405
406 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
407 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
408
409
410
411
412
413 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
414 cpa->pfn = pfn;
415
416 new_prot = static_protections(new_prot, address, pfn);
417
418
419
420
421
422
423 addr = address + PAGE_SIZE;
424 pfn++;
425 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
426 pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
427
428 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
429 goto out_unlock;
430 }
431
432
433
434
435
436 if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
437 do_split = 0;
438 goto out_unlock;
439 }
440
441
442
443
444
445
446
447
448
449 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
450
451
452
453
454 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
455 __set_pmd_pte(kpte, address, new_pte);
456 cpa->flags |= CPA_FLUSHTLB;
457 do_split = 0;
458 }
459
460out_unlock:
461 spin_unlock_irqrestore(&pgd_lock, flags);
462
463 return do_split;
464}
465
466static int split_large_page(pte_t *kpte, unsigned long address)
467{
468 unsigned long flags, pfn, pfninc = 1;
469 unsigned int i, level;
470 pte_t *pbase, *tmp;
471 pgprot_t ref_prot;
472 struct page *base;
473
474 if (!debug_pagealloc)
475 spin_unlock(&cpa_lock);
476 base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
477 if (!debug_pagealloc)
478 spin_lock(&cpa_lock);
479 if (!base)
480 return -ENOMEM;
481
482 spin_lock_irqsave(&pgd_lock, flags);
483
484
485
486
487 tmp = lookup_address(address, &level);
488 if (tmp != kpte)
489 goto out_unlock;
490
491 pbase = (pte_t *)page_address(base);
492 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
493 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
494
495
496
497
498
499
500 WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
501
502#ifdef CONFIG_X86_64
503 if (level == PG_LEVEL_1G) {
504 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
505 pgprot_val(ref_prot) |= _PAGE_PSE;
506 }
507#endif
508
509
510
511
512 pfn = pte_pfn(*kpte);
513 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
514 set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
515
516 if (address >= (unsigned long)__va(0) &&
517 address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT))
518 split_page_count(level);
519
520#ifdef CONFIG_X86_64
521 if (address >= (unsigned long)__va(1UL<<32) &&
522 address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT))
523 split_page_count(level);
524#endif
525
526
527
528
529
530
531
532
533 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
534
535
536
537
538
539
540
541
542
543 __flush_tlb_all();
544
545 base = NULL;
546
547out_unlock:
548
549
550
551
552 if (base)
553 __free_page(base);
554 spin_unlock_irqrestore(&pgd_lock, flags);
555
556 return 0;
557}
558
559static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
560 int primary)
561{
562
563
564
565 if (!primary)
566 return 0;
567
568
569
570
571
572
573
574
575 if (within(vaddr, PAGE_OFFSET,
576 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
577 cpa->numpages = 1;
578 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
579 return 0;
580 } else {
581 WARN(1, KERN_WARNING "CPA: called for zero pte. "
582 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
583 *cpa->vaddr);
584
585 return -EFAULT;
586 }
587}
588
589static int __change_page_attr(struct cpa_data *cpa, int primary)
590{
591 unsigned long address;
592 int do_split, err;
593 unsigned int level;
594 pte_t *kpte, old_pte;
595
596 if (cpa->flags & CPA_PAGES_ARRAY) {
597 struct page *page = cpa->pages[cpa->curpage];
598 if (unlikely(PageHighMem(page)))
599 return 0;
600 address = (unsigned long)page_address(page);
601 } else if (cpa->flags & CPA_ARRAY)
602 address = cpa->vaddr[cpa->curpage];
603 else
604 address = *cpa->vaddr;
605repeat:
606 kpte = lookup_address(address, &level);
607 if (!kpte)
608 return __cpa_process_fault(cpa, address, primary);
609
610 old_pte = *kpte;
611 if (!pte_val(old_pte))
612 return __cpa_process_fault(cpa, address, primary);
613
614 if (level == PG_LEVEL_4K) {
615 pte_t new_pte;
616 pgprot_t new_prot = pte_pgprot(old_pte);
617 unsigned long pfn = pte_pfn(old_pte);
618
619 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
620 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
621
622 new_prot = static_protections(new_prot, address, pfn);
623
624
625
626
627
628
629 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
630 cpa->pfn = pfn;
631
632
633
634 if (pte_val(old_pte) != pte_val(new_pte)) {
635 set_pte_atomic(kpte, new_pte);
636 cpa->flags |= CPA_FLUSHTLB;
637 }
638 cpa->numpages = 1;
639 return 0;
640 }
641
642
643
644
645
646 do_split = try_preserve_large_page(kpte, address, cpa);
647
648
649
650
651
652 if (do_split <= 0)
653 return do_split;
654
655
656
657
658 err = split_large_page(kpte, address);
659 if (!err) {
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678 flush_tlb_all();
679 goto repeat;
680 }
681
682 return err;
683}
684
685static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
686
687static int cpa_process_alias(struct cpa_data *cpa)
688{
689 struct cpa_data alias_cpa;
690 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
691 unsigned long vaddr;
692 int ret;
693
694 if (cpa->pfn >= max_pfn_mapped)
695 return 0;
696
697#ifdef CONFIG_X86_64
698 if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT)))
699 return 0;
700#endif
701
702
703
704
705 if (cpa->flags & CPA_PAGES_ARRAY) {
706 struct page *page = cpa->pages[cpa->curpage];
707 if (unlikely(PageHighMem(page)))
708 return 0;
709 vaddr = (unsigned long)page_address(page);
710 } else if (cpa->flags & CPA_ARRAY)
711 vaddr = cpa->vaddr[cpa->curpage];
712 else
713 vaddr = *cpa->vaddr;
714
715 if (!(within(vaddr, PAGE_OFFSET,
716 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
717
718 alias_cpa = *cpa;
719 alias_cpa.vaddr = &laddr;
720 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
721
722 ret = __change_page_attr_set_clr(&alias_cpa, 0);
723 if (ret)
724 return ret;
725 }
726
727#ifdef CONFIG_X86_64
728
729
730
731
732
733 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
734 within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
735 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
736 __START_KERNEL_map - phys_base;
737 alias_cpa = *cpa;
738 alias_cpa.vaddr = &temp_cpa_vaddr;
739 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
740
741
742
743
744
745 __change_page_attr_set_clr(&alias_cpa, 0);
746 }
747#endif
748
749 return 0;
750}
751
752static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
753{
754 int ret, numpages = cpa->numpages;
755
756 while (numpages) {
757
758
759
760
761 cpa->numpages = numpages;
762
763 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
764 cpa->numpages = 1;
765
766 if (!debug_pagealloc)
767 spin_lock(&cpa_lock);
768 ret = __change_page_attr(cpa, checkalias);
769 if (!debug_pagealloc)
770 spin_unlock(&cpa_lock);
771 if (ret)
772 return ret;
773
774 if (checkalias) {
775 ret = cpa_process_alias(cpa);
776 if (ret)
777 return ret;
778 }
779
780
781
782
783
784
785 BUG_ON(cpa->numpages > numpages);
786 numpages -= cpa->numpages;
787 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
788 cpa->curpage++;
789 else
790 *cpa->vaddr += cpa->numpages * PAGE_SIZE;
791
792 }
793 return 0;
794}
795
796static inline int cache_attr(pgprot_t attr)
797{
798 return pgprot_val(attr) &
799 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
800}
801
802static int change_page_attr_set_clr(unsigned long *addr, int numpages,
803 pgprot_t mask_set, pgprot_t mask_clr,
804 int force_split, int in_flag,
805 struct page **pages)
806{
807 struct cpa_data cpa;
808 int ret, cache, checkalias;
809 unsigned long baddr = 0;
810
811
812
813
814
815 mask_set = canon_pgprot(mask_set);
816 mask_clr = canon_pgprot(mask_clr);
817 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
818 return 0;
819
820
821 if (in_flag & CPA_ARRAY) {
822 int i;
823 for (i = 0; i < numpages; i++) {
824 if (addr[i] & ~PAGE_MASK) {
825 addr[i] &= PAGE_MASK;
826 WARN_ON_ONCE(1);
827 }
828 }
829 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
830
831
832
833
834 if (*addr & ~PAGE_MASK) {
835 *addr &= PAGE_MASK;
836
837
838
839 WARN_ON_ONCE(1);
840 }
841
842
843
844
845 baddr = *addr;
846 }
847
848
849 kmap_flush_unused();
850
851 vm_unmap_aliases();
852
853 cpa.vaddr = addr;
854 cpa.pages = pages;
855 cpa.numpages = numpages;
856 cpa.mask_set = mask_set;
857 cpa.mask_clr = mask_clr;
858 cpa.flags = 0;
859 cpa.curpage = 0;
860 cpa.force_split = force_split;
861
862 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
863 cpa.flags |= in_flag;
864
865
866 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
867
868 ret = __change_page_attr_set_clr(&cpa, checkalias);
869
870
871
872
873 if (!(cpa.flags & CPA_FLUSHTLB))
874 goto out;
875
876
877
878
879
880 cache = cache_attr(mask_set);
881
882
883
884
885
886
887
888 if (!ret && cpu_has_clflush) {
889 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
890 cpa_flush_array(addr, numpages, cache,
891 cpa.flags, pages);
892 } else
893 cpa_flush_range(baddr, numpages, cache);
894 } else
895 cpa_flush_all(cache);
896
897out:
898 return ret;
899}
900
901static inline int change_page_attr_set(unsigned long *addr, int numpages,
902 pgprot_t mask, int array)
903{
904 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
905 (array ? CPA_ARRAY : 0), NULL);
906}
907
908static inline int change_page_attr_clear(unsigned long *addr, int numpages,
909 pgprot_t mask, int array)
910{
911 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
912 (array ? CPA_ARRAY : 0), NULL);
913}
914
915static inline int cpa_set_pages_array(struct page **pages, int numpages,
916 pgprot_t mask)
917{
918 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
919 CPA_PAGES_ARRAY, pages);
920}
921
922static inline int cpa_clear_pages_array(struct page **pages, int numpages,
923 pgprot_t mask)
924{
925 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
926 CPA_PAGES_ARRAY, pages);
927}
928
929int _set_memory_uc(unsigned long addr, int numpages)
930{
931
932
933
934 return change_page_attr_set(&addr, numpages,
935 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
936}
937
938int set_memory_uc(unsigned long addr, int numpages)
939{
940 int ret;
941
942
943
944
945 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
946 _PAGE_CACHE_UC_MINUS, NULL);
947 if (ret)
948 goto out_err;
949
950 ret = _set_memory_uc(addr, numpages);
951 if (ret)
952 goto out_free;
953
954 return 0;
955
956out_free:
957 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
958out_err:
959 return ret;
960}
961EXPORT_SYMBOL(set_memory_uc);
962
963int set_memory_array_uc(unsigned long *addr, int addrinarray)
964{
965 int i, j;
966 int ret;
967
968
969
970
971 for (i = 0; i < addrinarray; i++) {
972 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
973 _PAGE_CACHE_UC_MINUS, NULL);
974 if (ret)
975 goto out_free;
976 }
977
978 ret = change_page_attr_set(addr, addrinarray,
979 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
980 if (ret)
981 goto out_free;
982
983 return 0;
984
985out_free:
986 for (j = 0; j < i; j++)
987 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
988
989 return ret;
990}
991EXPORT_SYMBOL(set_memory_array_uc);
992
993int _set_memory_wc(unsigned long addr, int numpages)
994{
995 int ret;
996 unsigned long addr_copy = addr;
997
998 ret = change_page_attr_set(&addr, numpages,
999 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
1000 if (!ret) {
1001 ret = change_page_attr_set_clr(&addr_copy, numpages,
1002 __pgprot(_PAGE_CACHE_WC),
1003 __pgprot(_PAGE_CACHE_MASK),
1004 0, 0, NULL);
1005 }
1006 return ret;
1007}
1008
1009int set_memory_wc(unsigned long addr, int numpages)
1010{
1011 int ret;
1012
1013 if (!pat_enabled)
1014 return set_memory_uc(addr, numpages);
1015
1016 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
1017 _PAGE_CACHE_WC, NULL);
1018 if (ret)
1019 goto out_err;
1020
1021 ret = _set_memory_wc(addr, numpages);
1022 if (ret)
1023 goto out_free;
1024
1025 return 0;
1026
1027out_free:
1028 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1029out_err:
1030 return ret;
1031}
1032EXPORT_SYMBOL(set_memory_wc);
1033
1034int _set_memory_wb(unsigned long addr, int numpages)
1035{
1036 return change_page_attr_clear(&addr, numpages,
1037 __pgprot(_PAGE_CACHE_MASK), 0);
1038}
1039
1040int set_memory_wb(unsigned long addr, int numpages)
1041{
1042 int ret;
1043
1044 ret = _set_memory_wb(addr, numpages);
1045 if (ret)
1046 return ret;
1047
1048 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1049 return 0;
1050}
1051EXPORT_SYMBOL(set_memory_wb);
1052
1053int set_memory_array_wb(unsigned long *addr, int addrinarray)
1054{
1055 int i;
1056 int ret;
1057
1058 ret = change_page_attr_clear(addr, addrinarray,
1059 __pgprot(_PAGE_CACHE_MASK), 1);
1060 if (ret)
1061 return ret;
1062
1063 for (i = 0; i < addrinarray; i++)
1064 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
1065
1066 return 0;
1067}
1068EXPORT_SYMBOL(set_memory_array_wb);
1069
1070int set_memory_x(unsigned long addr, int numpages)
1071{
1072 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
1073}
1074EXPORT_SYMBOL(set_memory_x);
1075
1076int set_memory_nx(unsigned long addr, int numpages)
1077{
1078 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
1079}
1080EXPORT_SYMBOL(set_memory_nx);
1081
1082int set_memory_ro(unsigned long addr, int numpages)
1083{
1084 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
1085}
1086EXPORT_SYMBOL_GPL(set_memory_ro);
1087
1088int set_memory_rw(unsigned long addr, int numpages)
1089{
1090 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
1091}
1092EXPORT_SYMBOL_GPL(set_memory_rw);
1093
1094int set_memory_np(unsigned long addr, int numpages)
1095{
1096 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
1097}
1098
1099int set_memory_4k(unsigned long addr, int numpages)
1100{
1101 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
1102 __pgprot(0), 1, 0, NULL);
1103}
1104
1105int set_pages_uc(struct page *page, int numpages)
1106{
1107 unsigned long addr = (unsigned long)page_address(page);
1108
1109 return set_memory_uc(addr, numpages);
1110}
1111EXPORT_SYMBOL(set_pages_uc);
1112
1113int set_pages_array_uc(struct page **pages, int addrinarray)
1114{
1115 unsigned long start;
1116 unsigned long end;
1117 int i;
1118 int free_idx;
1119
1120 for (i = 0; i < addrinarray; i++) {
1121 if (PageHighMem(pages[i]))
1122 continue;
1123 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1124 end = start + PAGE_SIZE;
1125 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
1126 goto err_out;
1127 }
1128
1129 if (cpa_set_pages_array(pages, addrinarray,
1130 __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
1131 return 0;
1132 }
1133err_out:
1134 free_idx = i;
1135 for (i = 0; i < free_idx; i++) {
1136 if (PageHighMem(pages[i]))
1137 continue;
1138 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1139 end = start + PAGE_SIZE;
1140 free_memtype(start, end);
1141 }
1142 return -EINVAL;
1143}
1144EXPORT_SYMBOL(set_pages_array_uc);
1145
1146int set_pages_wb(struct page *page, int numpages)
1147{
1148 unsigned long addr = (unsigned long)page_address(page);
1149
1150 return set_memory_wb(addr, numpages);
1151}
1152EXPORT_SYMBOL(set_pages_wb);
1153
1154int set_pages_array_wb(struct page **pages, int addrinarray)
1155{
1156 int retval;
1157 unsigned long start;
1158 unsigned long end;
1159 int i;
1160
1161 retval = cpa_clear_pages_array(pages, addrinarray,
1162 __pgprot(_PAGE_CACHE_MASK));
1163 if (retval)
1164 return retval;
1165
1166 for (i = 0; i < addrinarray; i++) {
1167 if (PageHighMem(pages[i]))
1168 continue;
1169 start = page_to_pfn(pages[i]) << PAGE_SHIFT;
1170 end = start + PAGE_SIZE;
1171 free_memtype(start, end);
1172 }
1173
1174 return 0;
1175}
1176EXPORT_SYMBOL(set_pages_array_wb);
1177
1178int set_pages_x(struct page *page, int numpages)
1179{
1180 unsigned long addr = (unsigned long)page_address(page);
1181
1182 return set_memory_x(addr, numpages);
1183}
1184EXPORT_SYMBOL(set_pages_x);
1185
1186int set_pages_nx(struct page *page, int numpages)
1187{
1188 unsigned long addr = (unsigned long)page_address(page);
1189
1190 return set_memory_nx(addr, numpages);
1191}
1192EXPORT_SYMBOL(set_pages_nx);
1193
1194int set_pages_ro(struct page *page, int numpages)
1195{
1196 unsigned long addr = (unsigned long)page_address(page);
1197
1198 return set_memory_ro(addr, numpages);
1199}
1200
1201int set_pages_rw(struct page *page, int numpages)
1202{
1203 unsigned long addr = (unsigned long)page_address(page);
1204
1205 return set_memory_rw(addr, numpages);
1206}
1207
1208#ifdef CONFIG_DEBUG_PAGEALLOC
1209
1210static int __set_pages_p(struct page *page, int numpages)
1211{
1212 unsigned long tempaddr = (unsigned long) page_address(page);
1213 struct cpa_data cpa = { .vaddr = &tempaddr,
1214 .numpages = numpages,
1215 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1216 .mask_clr = __pgprot(0),
1217 .flags = 0};
1218
1219
1220
1221
1222
1223
1224
1225 return __change_page_attr_set_clr(&cpa, 0);
1226}
1227
1228static int __set_pages_np(struct page *page, int numpages)
1229{
1230 unsigned long tempaddr = (unsigned long) page_address(page);
1231 struct cpa_data cpa = { .vaddr = &tempaddr,
1232 .numpages = numpages,
1233 .mask_set = __pgprot(0),
1234 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1235 .flags = 0};
1236
1237
1238
1239
1240
1241
1242
1243 return __change_page_attr_set_clr(&cpa, 0);
1244}
1245
1246void kernel_map_pages(struct page *page, int numpages, int enable)
1247{
1248 if (PageHighMem(page))
1249 return;
1250 if (!enable) {
1251 debug_check_no_locks_freed(page_address(page),
1252 numpages * PAGE_SIZE);
1253 }
1254
1255
1256
1257
1258 if (!debug_pagealloc_enabled)
1259 return;
1260
1261
1262
1263
1264
1265
1266 if (enable)
1267 __set_pages_p(page, numpages);
1268 else
1269 __set_pages_np(page, numpages);
1270
1271
1272
1273
1274
1275 __flush_tlb_all();
1276}
1277
1278#ifdef CONFIG_HIBERNATION
1279
1280bool kernel_page_present(struct page *page)
1281{
1282 unsigned int level;
1283 pte_t *pte;
1284
1285 if (PageHighMem(page))
1286 return false;
1287
1288 pte = lookup_address((unsigned long)page_address(page), &level);
1289 return (pte_val(*pte) & _PAGE_PRESENT);
1290}
1291
1292#endif
1293
1294#endif
1295
1296
1297
1298
1299
1300#ifdef CONFIG_CPA_DEBUG
1301#include "pageattr-test.c"
1302#endif
1303