1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/sched.h>
42#include <linux/highmem.h>
43#include <linux/debugfs.h>
44#include <linux/bug.h>
45#include <linux/vmalloc.h>
46#include <linux/module.h>
47#include <linux/gfp.h>
48#include <linux/memblock.h>
49#include <linux/seq_file.h>
50
51#include <trace/events/xen.h>
52
53#include <asm/pgtable.h>
54#include <asm/tlbflush.h>
55#include <asm/fixmap.h>
56#include <asm/mmu_context.h>
57#include <asm/setup.h>
58#include <asm/paravirt.h>
59#include <asm/e820.h>
60#include <asm/linkage.h>
61#include <asm/page.h>
62#include <asm/init.h>
63#include <asm/pat.h>
64#include <asm/smp.h>
65
66#include <asm/xen/hypercall.h>
67#include <asm/xen/hypervisor.h>
68
69#include <xen/xen.h>
70#include <xen/page.h>
71#include <xen/interface/xen.h>
72#include <xen/interface/hvm/hvm_op.h>
73#include <xen/interface/version.h>
74#include <xen/interface/memory.h>
75#include <xen/hvc-console.h>
76
77#include "multicalls.h"
78#include "mmu.h"
79#include "debugfs.h"
80
81
82
83
84
85DEFINE_SPINLOCK(xen_reservation_lock);
86
87
88
89
90
91
92#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
93static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
94
95#ifdef CONFIG_X86_64
96
97static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
98#endif
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114DEFINE_PER_CPU(unsigned long, xen_cr3);
115DEFINE_PER_CPU(unsigned long, xen_current_cr3);
116
117
118
119
120
121
122#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
123
124unsigned long arbitrary_virt_to_mfn(void *vaddr)
125{
126 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
127
128 return PFN_DOWN(maddr.maddr);
129}
130
131xmaddr_t arbitrary_virt_to_machine(void *vaddr)
132{
133 unsigned long address = (unsigned long)vaddr;
134 unsigned int level;
135 pte_t *pte;
136 unsigned offset;
137
138
139
140
141
142 if (virt_addr_valid(vaddr))
143 return virt_to_machine(vaddr);
144
145
146
147 pte = lookup_address(address, &level);
148 BUG_ON(pte == NULL);
149 offset = address & ~PAGE_MASK;
150 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
151}
152EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
153
154void make_lowmem_page_readonly(void *vaddr)
155{
156 pte_t *pte, ptev;
157 unsigned long address = (unsigned long)vaddr;
158 unsigned int level;
159
160 pte = lookup_address(address, &level);
161 if (pte == NULL)
162 return;
163
164 ptev = pte_wrprotect(*pte);
165
166 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
167 BUG();
168}
169
170void make_lowmem_page_readwrite(void *vaddr)
171{
172 pte_t *pte, ptev;
173 unsigned long address = (unsigned long)vaddr;
174 unsigned int level;
175
176 pte = lookup_address(address, &level);
177 if (pte == NULL)
178 return;
179
180 ptev = pte_mkwrite(*pte);
181
182 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
183 BUG();
184}
185
186
187static bool xen_page_pinned(void *ptr)
188{
189 struct page *page = virt_to_page(ptr);
190
191 return PagePinned(page);
192}
193
194void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
195{
196 struct multicall_space mcs;
197 struct mmu_update *u;
198
199 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
200
201 mcs = xen_mc_entry(sizeof(*u));
202 u = mcs.args;
203
204
205 u->ptr = virt_to_machine(ptep).maddr;
206 u->val = pte_val_ma(pteval);
207
208 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
209
210 xen_mc_issue(PARAVIRT_LAZY_MMU);
211}
212EXPORT_SYMBOL_GPL(xen_set_domain_pte);
213
214static void xen_extend_mmu_update(const struct mmu_update *update)
215{
216 struct multicall_space mcs;
217 struct mmu_update *u;
218
219 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
220
221 if (mcs.mc != NULL) {
222 mcs.mc->args[1]++;
223 } else {
224 mcs = __xen_mc_entry(sizeof(*u));
225 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
226 }
227
228 u = mcs.args;
229 *u = *update;
230}
231
232static void xen_extend_mmuext_op(const struct mmuext_op *op)
233{
234 struct multicall_space mcs;
235 struct mmuext_op *u;
236
237 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
238
239 if (mcs.mc != NULL) {
240 mcs.mc->args[1]++;
241 } else {
242 mcs = __xen_mc_entry(sizeof(*u));
243 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
244 }
245
246 u = mcs.args;
247 *u = *op;
248}
249
250static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
251{
252 struct mmu_update u;
253
254 preempt_disable();
255
256 xen_mc_batch();
257
258
259 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
260 u.val = pmd_val_ma(val);
261 xen_extend_mmu_update(&u);
262
263 xen_mc_issue(PARAVIRT_LAZY_MMU);
264
265 preempt_enable();
266}
267
268static void xen_set_pmd(pmd_t *ptr, pmd_t val)
269{
270 trace_xen_mmu_set_pmd(ptr, val);
271
272
273
274 if (!xen_page_pinned(ptr)) {
275 *ptr = val;
276 return;
277 }
278
279 xen_set_pmd_hyper(ptr, val);
280}
281
282
283
284
285
286void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
287{
288 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
289}
290
291static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
292{
293 struct mmu_update u;
294
295 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
296 return false;
297
298 xen_mc_batch();
299
300 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
301 u.val = pte_val_ma(pteval);
302 xen_extend_mmu_update(&u);
303
304 xen_mc_issue(PARAVIRT_LAZY_MMU);
305
306 return true;
307}
308
309static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
310{
311 if (!xen_batched_set_pte(ptep, pteval)) {
312
313
314
315
316
317
318
319 struct mmu_update u;
320
321 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
322 u.val = pte_val_ma(pteval);
323 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
324 }
325}
326
327static void xen_set_pte(pte_t *ptep, pte_t pteval)
328{
329 trace_xen_mmu_set_pte(ptep, pteval);
330 __xen_set_pte(ptep, pteval);
331}
332
333static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
334 pte_t *ptep, pte_t pteval)
335{
336 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
337 __xen_set_pte(ptep, pteval);
338}
339
340pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
341 unsigned long addr, pte_t *ptep)
342{
343
344 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
345 return *ptep;
346}
347
348void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
349 pte_t *ptep, pte_t pte)
350{
351 struct mmu_update u;
352
353 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
354 xen_mc_batch();
355
356 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
357 u.val = pte_val_ma(pte);
358 xen_extend_mmu_update(&u);
359
360 xen_mc_issue(PARAVIRT_LAZY_MMU);
361}
362
363
364static pteval_t pte_mfn_to_pfn(pteval_t val)
365{
366 if (val & _PAGE_PRESENT) {
367 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
368 unsigned long pfn = mfn_to_pfn(mfn);
369
370 pteval_t flags = val & PTE_FLAGS_MASK;
371 if (unlikely(pfn == ~0))
372 val = flags & ~_PAGE_PRESENT;
373 else
374 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
375 }
376
377 return val;
378}
379
380static pteval_t pte_pfn_to_mfn(pteval_t val)
381{
382 if (val & _PAGE_PRESENT) {
383 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
384 pteval_t flags = val & PTE_FLAGS_MASK;
385 unsigned long mfn;
386
387 if (!xen_feature(XENFEAT_auto_translated_physmap))
388 mfn = get_phys_to_machine(pfn);
389 else
390 mfn = pfn;
391
392
393
394
395
396
397 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
398 mfn = 0;
399 flags = 0;
400 } else {
401
402
403
404
405
406 mfn &= ~FOREIGN_FRAME_BIT;
407 if (mfn & IDENTITY_FRAME_BIT) {
408 mfn &= ~IDENTITY_FRAME_BIT;
409 flags |= _PAGE_IOMAP;
410 }
411 }
412 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
413 }
414
415 return val;
416}
417
418static pteval_t iomap_pte(pteval_t val)
419{
420 if (val & _PAGE_PRESENT) {
421 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
422 pteval_t flags = val & PTE_FLAGS_MASK;
423
424
425
426 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
427 }
428
429 return val;
430}
431
432static pteval_t xen_pte_val(pte_t pte)
433{
434 pteval_t pteval = pte.pte;
435#if 0
436
437 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
438 WARN_ON(!pat_enabled);
439 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
440 }
441#endif
442 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
443 return pteval;
444
445 return pte_mfn_to_pfn(pteval);
446}
447PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
448
449static pgdval_t xen_pgd_val(pgd_t pgd)
450{
451 return pte_mfn_to_pfn(pgd.pgd);
452}
453PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473void xen_set_pat(u64 pat)
474{
475
476
477 WARN_ON(pat != 0x0007010600070106ull);
478}
479
480static pte_t xen_make_pte(pteval_t pte)
481{
482 phys_addr_t addr = (pte & PTE_PFN_MASK);
483#if 0
484
485
486
487
488
489
490
491
492 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
493 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
494 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
495 }
496#endif
497
498
499
500
501
502
503 if (unlikely(pte & _PAGE_IOMAP) &&
504 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
505 pte = iomap_pte(pte);
506 } else {
507 pte &= ~_PAGE_IOMAP;
508 pte = pte_pfn_to_mfn(pte);
509 }
510
511 return native_make_pte(pte);
512}
513PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
514
515static pgd_t xen_make_pgd(pgdval_t pgd)
516{
517 pgd = pte_pfn_to_mfn(pgd);
518 return native_make_pgd(pgd);
519}
520PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
521
522static pmdval_t xen_pmd_val(pmd_t pmd)
523{
524 return pte_mfn_to_pfn(pmd.pmd);
525}
526PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
527
528static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
529{
530 struct mmu_update u;
531
532 preempt_disable();
533
534 xen_mc_batch();
535
536
537 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
538 u.val = pud_val_ma(val);
539 xen_extend_mmu_update(&u);
540
541 xen_mc_issue(PARAVIRT_LAZY_MMU);
542
543 preempt_enable();
544}
545
546static void xen_set_pud(pud_t *ptr, pud_t val)
547{
548 trace_xen_mmu_set_pud(ptr, val);
549
550
551
552 if (!xen_page_pinned(ptr)) {
553 *ptr = val;
554 return;
555 }
556
557 xen_set_pud_hyper(ptr, val);
558}
559
560#ifdef CONFIG_X86_PAE
561static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
562{
563 trace_xen_mmu_set_pte_atomic(ptep, pte);
564 set_64bit((u64 *)ptep, native_pte_val(pte));
565}
566
567static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
568{
569 trace_xen_mmu_pte_clear(mm, addr, ptep);
570 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
571 native_pte_clear(mm, addr, ptep);
572}
573
574static void xen_pmd_clear(pmd_t *pmdp)
575{
576 trace_xen_mmu_pmd_clear(pmdp);
577 set_pmd(pmdp, __pmd(0));
578}
579#endif
580
581static pmd_t xen_make_pmd(pmdval_t pmd)
582{
583 pmd = pte_pfn_to_mfn(pmd);
584 return native_make_pmd(pmd);
585}
586PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
587
588#if PAGETABLE_LEVELS == 4
589static pudval_t xen_pud_val(pud_t pud)
590{
591 return pte_mfn_to_pfn(pud.pud);
592}
593PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
594
595static pud_t xen_make_pud(pudval_t pud)
596{
597 pud = pte_pfn_to_mfn(pud);
598
599 return native_make_pud(pud);
600}
601PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
602
603static pgd_t *xen_get_user_pgd(pgd_t *pgd)
604{
605 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
606 unsigned offset = pgd - pgd_page;
607 pgd_t *user_ptr = NULL;
608
609 if (offset < pgd_index(USER_LIMIT)) {
610 struct page *page = virt_to_page(pgd_page);
611 user_ptr = (pgd_t *)page->private;
612 if (user_ptr)
613 user_ptr += offset;
614 }
615
616 return user_ptr;
617}
618
619static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
620{
621 struct mmu_update u;
622
623 u.ptr = virt_to_machine(ptr).maddr;
624 u.val = pgd_val_ma(val);
625 xen_extend_mmu_update(&u);
626}
627
628
629
630
631
632
633
634
635static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
636{
637 preempt_disable();
638
639 xen_mc_batch();
640
641 __xen_set_pgd_hyper(ptr, val);
642
643 xen_mc_issue(PARAVIRT_LAZY_MMU);
644
645 preempt_enable();
646}
647
648static void xen_set_pgd(pgd_t *ptr, pgd_t val)
649{
650 pgd_t *user_ptr = xen_get_user_pgd(ptr);
651
652 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
653
654
655
656 if (!xen_page_pinned(ptr)) {
657 *ptr = val;
658 if (user_ptr) {
659 WARN_ON(xen_page_pinned(user_ptr));
660 *user_ptr = val;
661 }
662 return;
663 }
664
665
666
667 xen_mc_batch();
668
669 __xen_set_pgd_hyper(ptr, val);
670 if (user_ptr)
671 __xen_set_pgd_hyper(user_ptr, val);
672
673 xen_mc_issue(PARAVIRT_LAZY_MMU);
674}
675#endif
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
693 int (*func)(struct mm_struct *mm, struct page *,
694 enum pt_level),
695 unsigned long limit)
696{
697 int flush = 0;
698 unsigned hole_low, hole_high;
699 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
700 unsigned pgdidx, pudidx, pmdidx;
701
702
703 limit--;
704 BUG_ON(limit >= FIXADDR_TOP);
705
706 if (xen_feature(XENFEAT_auto_translated_physmap))
707 return 0;
708
709
710
711
712
713
714 hole_low = pgd_index(USER_LIMIT);
715 hole_high = pgd_index(PAGE_OFFSET);
716
717 pgdidx_limit = pgd_index(limit);
718#if PTRS_PER_PUD > 1
719 pudidx_limit = pud_index(limit);
720#else
721 pudidx_limit = 0;
722#endif
723#if PTRS_PER_PMD > 1
724 pmdidx_limit = pmd_index(limit);
725#else
726 pmdidx_limit = 0;
727#endif
728
729 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
730 pud_t *pud;
731
732 if (pgdidx >= hole_low && pgdidx < hole_high)
733 continue;
734
735 if (!pgd_val(pgd[pgdidx]))
736 continue;
737
738 pud = pud_offset(&pgd[pgdidx], 0);
739
740 if (PTRS_PER_PUD > 1)
741 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
742
743 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
744 pmd_t *pmd;
745
746 if (pgdidx == pgdidx_limit &&
747 pudidx > pudidx_limit)
748 goto out;
749
750 if (pud_none(pud[pudidx]))
751 continue;
752
753 pmd = pmd_offset(&pud[pudidx], 0);
754
755 if (PTRS_PER_PMD > 1)
756 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
757
758 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
759 struct page *pte;
760
761 if (pgdidx == pgdidx_limit &&
762 pudidx == pudidx_limit &&
763 pmdidx > pmdidx_limit)
764 goto out;
765
766 if (pmd_none(pmd[pmdidx]))
767 continue;
768
769 pte = pmd_page(pmd[pmdidx]);
770 flush |= (*func)(mm, pte, PT_PTE);
771 }
772 }
773 }
774
775out:
776
777
778 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
779
780 return flush;
781}
782
783static int xen_pgd_walk(struct mm_struct *mm,
784 int (*func)(struct mm_struct *mm, struct page *,
785 enum pt_level),
786 unsigned long limit)
787{
788 return __xen_pgd_walk(mm, mm->pgd, func, limit);
789}
790
791
792
793static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
794{
795 spinlock_t *ptl = NULL;
796
797#if USE_SPLIT_PTLOCKS
798 ptl = __pte_lockptr(page);
799 spin_lock_nest_lock(ptl, &mm->page_table_lock);
800#endif
801
802 return ptl;
803}
804
805static void xen_pte_unlock(void *v)
806{
807 spinlock_t *ptl = v;
808 spin_unlock(ptl);
809}
810
811static void xen_do_pin(unsigned level, unsigned long pfn)
812{
813 struct mmuext_op op;
814
815 op.cmd = level;
816 op.arg1.mfn = pfn_to_mfn(pfn);
817
818 xen_extend_mmuext_op(&op);
819}
820
821static int xen_pin_page(struct mm_struct *mm, struct page *page,
822 enum pt_level level)
823{
824 unsigned pgfl = TestSetPagePinned(page);
825 int flush;
826
827 if (pgfl)
828 flush = 0;
829 else if (PageHighMem(page))
830
831
832 flush = 1;
833 else {
834 void *pt = lowmem_page_address(page);
835 unsigned long pfn = page_to_pfn(page);
836 struct multicall_space mcs = __xen_mc_entry(0);
837 spinlock_t *ptl;
838
839 flush = 0;
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861 ptl = NULL;
862 if (level == PT_PTE)
863 ptl = xen_pte_lock(page, mm);
864
865 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
866 pfn_pte(pfn, PAGE_KERNEL_RO),
867 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
868
869 if (ptl) {
870 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
871
872
873
874 xen_mc_callback(xen_pte_unlock, ptl);
875 }
876 }
877
878 return flush;
879}
880
881
882
883
884static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
885{
886 trace_xen_mmu_pgd_pin(mm, pgd);
887
888 xen_mc_batch();
889
890 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
891
892 xen_mc_issue(0);
893
894 kmap_flush_unused();
895
896 xen_mc_batch();
897 }
898
899#ifdef CONFIG_X86_64
900 {
901 pgd_t *user_pgd = xen_get_user_pgd(pgd);
902
903 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
904
905 if (user_pgd) {
906 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
907 xen_do_pin(MMUEXT_PIN_L4_TABLE,
908 PFN_DOWN(__pa(user_pgd)));
909 }
910 }
911#else
912#ifdef CONFIG_X86_PAE
913
914 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
915 PT_PMD);
916#endif
917 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
918#endif
919 xen_mc_issue(0);
920}
921
922static void xen_pgd_pin(struct mm_struct *mm)
923{
924 __xen_pgd_pin(mm, mm->pgd);
925}
926
927
928
929
930
931
932
933
934
935
936
937void xen_mm_pin_all(void)
938{
939 struct page *page;
940
941 spin_lock(&pgd_lock);
942
943 list_for_each_entry(page, &pgd_list, lru) {
944 if (!PagePinned(page)) {
945 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
946 SetPageSavePinned(page);
947 }
948 }
949
950 spin_unlock(&pgd_lock);
951}
952
953
954
955
956
957
958static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
959 enum pt_level level)
960{
961 SetPagePinned(page);
962 return 0;
963}
964
965static void __init xen_mark_init_mm_pinned(void)
966{
967 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
968}
969
970static int xen_unpin_page(struct mm_struct *mm, struct page *page,
971 enum pt_level level)
972{
973 unsigned pgfl = TestClearPagePinned(page);
974
975 if (pgfl && !PageHighMem(page)) {
976 void *pt = lowmem_page_address(page);
977 unsigned long pfn = page_to_pfn(page);
978 spinlock_t *ptl = NULL;
979 struct multicall_space mcs;
980
981
982
983
984
985
986
987
988 if (level == PT_PTE) {
989 ptl = xen_pte_lock(page, mm);
990
991 if (ptl)
992 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
993 }
994
995 mcs = __xen_mc_entry(0);
996
997 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
998 pfn_pte(pfn, PAGE_KERNEL),
999 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1000
1001 if (ptl) {
1002
1003 xen_mc_callback(xen_pte_unlock, ptl);
1004 }
1005 }
1006
1007 return 0;
1008}
1009
1010
1011static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
1012{
1013 trace_xen_mmu_pgd_unpin(mm, pgd);
1014
1015 xen_mc_batch();
1016
1017 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1018
1019#ifdef CONFIG_X86_64
1020 {
1021 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1022
1023 if (user_pgd) {
1024 xen_do_pin(MMUEXT_UNPIN_TABLE,
1025 PFN_DOWN(__pa(user_pgd)));
1026 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
1027 }
1028 }
1029#endif
1030
1031#ifdef CONFIG_X86_PAE
1032
1033 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
1034 PT_PMD);
1035#endif
1036
1037 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1038
1039 xen_mc_issue(0);
1040}
1041
1042static void xen_pgd_unpin(struct mm_struct *mm)
1043{
1044 __xen_pgd_unpin(mm, mm->pgd);
1045}
1046
1047
1048
1049
1050
1051void xen_mm_unpin_all(void)
1052{
1053 struct page *page;
1054
1055 spin_lock(&pgd_lock);
1056
1057 list_for_each_entry(page, &pgd_list, lru) {
1058 if (PageSavePinned(page)) {
1059 BUG_ON(!PagePinned(page));
1060 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
1061 ClearPageSavePinned(page);
1062 }
1063 }
1064
1065 spin_unlock(&pgd_lock);
1066}
1067
1068static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1069{
1070 spin_lock(&next->page_table_lock);
1071 xen_pgd_pin(next);
1072 spin_unlock(&next->page_table_lock);
1073}
1074
1075static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1076{
1077 spin_lock(&mm->page_table_lock);
1078 xen_pgd_pin(mm);
1079 spin_unlock(&mm->page_table_lock);
1080}
1081
1082
1083#ifdef CONFIG_SMP
1084
1085
1086static void drop_other_mm_ref(void *info)
1087{
1088 struct mm_struct *mm = info;
1089 struct mm_struct *active_mm;
1090
1091 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
1092
1093 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
1094 leave_mm(smp_processor_id());
1095
1096
1097
1098 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
1099 load_cr3(swapper_pg_dir);
1100}
1101
1102static void xen_drop_mm_ref(struct mm_struct *mm)
1103{
1104 cpumask_var_t mask;
1105 unsigned cpu;
1106
1107 if (current->active_mm == mm) {
1108 if (current->mm == mm)
1109 load_cr3(swapper_pg_dir);
1110 else
1111 leave_mm(smp_processor_id());
1112 }
1113
1114
1115 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1116 for_each_online_cpu(cpu) {
1117 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1118 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1119 continue;
1120 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1121 }
1122 return;
1123 }
1124 cpumask_copy(mask, mm_cpumask(mm));
1125
1126
1127
1128
1129
1130
1131 for_each_online_cpu(cpu) {
1132 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1133 cpumask_set_cpu(cpu, mask);
1134 }
1135
1136 if (!cpumask_empty(mask))
1137 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1138 free_cpumask_var(mask);
1139}
1140#else
1141static void xen_drop_mm_ref(struct mm_struct *mm)
1142{
1143 if (current->active_mm == mm)
1144 load_cr3(swapper_pg_dir);
1145}
1146#endif
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162static void xen_exit_mmap(struct mm_struct *mm)
1163{
1164 get_cpu();
1165 xen_drop_mm_ref(mm);
1166 put_cpu();
1167
1168 spin_lock(&mm->page_table_lock);
1169
1170
1171 if (xen_page_pinned(mm->pgd))
1172 xen_pgd_unpin(mm);
1173
1174 spin_unlock(&mm->page_table_lock);
1175}
1176
1177static void __init xen_pagetable_setup_start(pgd_t *base)
1178{
1179}
1180
1181static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1182{
1183
1184 native_pagetable_reserve(start, end);
1185
1186
1187 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1188 PFN_PHYS(pgt_buf_top));
1189 while (end < PFN_PHYS(pgt_buf_top)) {
1190 make_lowmem_page_readwrite(__va(end));
1191 end += PAGE_SIZE;
1192 }
1193}
1194
1195static void xen_post_allocator_init(void);
1196
1197static void __init xen_pagetable_setup_done(pgd_t *base)
1198{
1199 xen_setup_shared_info();
1200 xen_post_allocator_init();
1201}
1202
1203static void xen_write_cr2(unsigned long cr2)
1204{
1205 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1206}
1207
1208static unsigned long xen_read_cr2(void)
1209{
1210 return this_cpu_read(xen_vcpu)->arch.cr2;
1211}
1212
1213unsigned long xen_read_cr2_direct(void)
1214{
1215 return this_cpu_read(xen_vcpu_info.arch.cr2);
1216}
1217
1218static void xen_flush_tlb(void)
1219{
1220 struct mmuext_op *op;
1221 struct multicall_space mcs;
1222
1223 trace_xen_mmu_flush_tlb(0);
1224
1225 preempt_disable();
1226
1227 mcs = xen_mc_entry(sizeof(*op));
1228
1229 op = mcs.args;
1230 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1231 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1232
1233 xen_mc_issue(PARAVIRT_LAZY_MMU);
1234
1235 preempt_enable();
1236}
1237
1238static void xen_flush_tlb_single(unsigned long addr)
1239{
1240 struct mmuext_op *op;
1241 struct multicall_space mcs;
1242
1243 trace_xen_mmu_flush_tlb_single(addr);
1244
1245 preempt_disable();
1246
1247 mcs = xen_mc_entry(sizeof(*op));
1248 op = mcs.args;
1249 op->cmd = MMUEXT_INVLPG_LOCAL;
1250 op->arg1.linear_addr = addr & PAGE_MASK;
1251 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1252
1253 xen_mc_issue(PARAVIRT_LAZY_MMU);
1254
1255 preempt_enable();
1256}
1257
1258static void xen_flush_tlb_others(const struct cpumask *cpus,
1259 struct mm_struct *mm, unsigned long start,
1260 unsigned long end)
1261{
1262 struct {
1263 struct mmuext_op op;
1264#ifdef CONFIG_SMP
1265 DECLARE_BITMAP(mask, num_processors);
1266#else
1267 DECLARE_BITMAP(mask, NR_CPUS);
1268#endif
1269 } *args;
1270 struct multicall_space mcs;
1271
1272 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
1273
1274 if (cpumask_empty(cpus))
1275 return;
1276
1277 mcs = xen_mc_entry(sizeof(*args));
1278 args = mcs.args;
1279 args->op.arg2.vcpumask = to_cpumask(args->mask);
1280
1281
1282 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1283 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1284
1285 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1286 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
1287 args->op.cmd = MMUEXT_INVLPG_MULTI;
1288 args->op.arg1.linear_addr = start;
1289 }
1290
1291 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1292
1293 xen_mc_issue(PARAVIRT_LAZY_MMU);
1294}
1295
1296static unsigned long xen_read_cr3(void)
1297{
1298 return this_cpu_read(xen_cr3);
1299}
1300
1301static void set_current_cr3(void *v)
1302{
1303 this_cpu_write(xen_current_cr3, (unsigned long)v);
1304}
1305
1306static void __xen_write_cr3(bool kernel, unsigned long cr3)
1307{
1308 struct mmuext_op op;
1309 unsigned long mfn;
1310
1311 trace_xen_mmu_write_cr3(kernel, cr3);
1312
1313 if (cr3)
1314 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1315 else
1316 mfn = 0;
1317
1318 WARN_ON(mfn == 0 && kernel);
1319
1320 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1321 op.arg1.mfn = mfn;
1322
1323 xen_extend_mmuext_op(&op);
1324
1325 if (kernel) {
1326 this_cpu_write(xen_cr3, cr3);
1327
1328
1329
1330 xen_mc_callback(set_current_cr3, (void *)cr3);
1331 }
1332}
1333
1334static void xen_write_cr3(unsigned long cr3)
1335{
1336 BUG_ON(preemptible());
1337
1338 xen_mc_batch();
1339
1340
1341
1342 this_cpu_write(xen_cr3, cr3);
1343
1344 __xen_write_cr3(true, cr3);
1345
1346#ifdef CONFIG_X86_64
1347 {
1348 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1349 if (user_pgd)
1350 __xen_write_cr3(false, __pa(user_pgd));
1351 else
1352 __xen_write_cr3(false, 0);
1353 }
1354#endif
1355
1356 xen_mc_issue(PARAVIRT_LAZY_CPU);
1357}
1358
1359static int xen_pgd_alloc(struct mm_struct *mm)
1360{
1361 pgd_t *pgd = mm->pgd;
1362 int ret = 0;
1363
1364 BUG_ON(PagePinned(virt_to_page(pgd)));
1365
1366#ifdef CONFIG_X86_64
1367 {
1368 struct page *page = virt_to_page(pgd);
1369 pgd_t *user_pgd;
1370
1371 BUG_ON(page->private != 0);
1372
1373 ret = -ENOMEM;
1374
1375 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1376 page->private = (unsigned long)user_pgd;
1377
1378 if (user_pgd != NULL) {
1379 user_pgd[pgd_index(VSYSCALL_START)] =
1380 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1381 ret = 0;
1382 }
1383
1384 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1385 }
1386#endif
1387
1388 return ret;
1389}
1390
1391static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1392{
1393#ifdef CONFIG_X86_64
1394 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1395
1396 if (user_pgd)
1397 free_page((unsigned long)user_pgd);
1398#endif
1399}
1400
1401#ifdef CONFIG_X86_32
1402static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1403{
1404
1405 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1406 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1407 pte_val_ma(pte));
1408
1409 return pte;
1410}
1411#else
1412static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1413{
1414 unsigned long pfn = pte_pfn(pte);
1415
1416
1417
1418
1419
1420
1421
1422 if (((!is_early_ioremap_ptep(ptep) &&
1423 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
1424 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
1425 pte = pte_wrprotect(pte);
1426
1427 return pte;
1428}
1429#endif
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1446{
1447 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1448 pte = mask_rw_pte(ptep, pte);
1449 else
1450 pte = __pte_ma(0);
1451
1452 native_set_pte(ptep, pte);
1453}
1454
1455static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1456{
1457 struct mmuext_op op;
1458 op.cmd = cmd;
1459 op.arg1.mfn = pfn_to_mfn(pfn);
1460 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1461 BUG();
1462}
1463
1464
1465
1466static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1467{
1468#ifdef CONFIG_FLATMEM
1469 BUG_ON(mem_map);
1470#endif
1471 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1472 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1473}
1474
1475
1476static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1477{
1478#ifdef CONFIG_FLATMEM
1479 BUG_ON(mem_map);
1480#endif
1481 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1482}
1483
1484
1485
1486static void __init xen_release_pte_init(unsigned long pfn)
1487{
1488 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1489 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1490}
1491
1492static void __init xen_release_pmd_init(unsigned long pfn)
1493{
1494 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1495}
1496
1497static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1498{
1499 struct multicall_space mcs;
1500 struct mmuext_op *op;
1501
1502 mcs = __xen_mc_entry(sizeof(*op));
1503 op = mcs.args;
1504 op->cmd = cmd;
1505 op->arg1.mfn = pfn_to_mfn(pfn);
1506
1507 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1508}
1509
1510static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1511{
1512 struct multicall_space mcs;
1513 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1514
1515 mcs = __xen_mc_entry(0);
1516 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1517 pfn_pte(pfn, prot), 0);
1518}
1519
1520
1521
1522static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1523 unsigned level)
1524{
1525 bool pinned = PagePinned(virt_to_page(mm->pgd));
1526
1527 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1528
1529 if (pinned) {
1530 struct page *page = pfn_to_page(pfn);
1531
1532 SetPagePinned(page);
1533
1534 if (!PageHighMem(page)) {
1535 xen_mc_batch();
1536
1537 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1538
1539 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1540 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1541
1542 xen_mc_issue(PARAVIRT_LAZY_MMU);
1543 } else {
1544
1545
1546 kmap_flush_unused();
1547 }
1548 }
1549}
1550
1551static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1552{
1553 xen_alloc_ptpage(mm, pfn, PT_PTE);
1554}
1555
1556static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1557{
1558 xen_alloc_ptpage(mm, pfn, PT_PMD);
1559}
1560
1561
1562static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1563{
1564 struct page *page = pfn_to_page(pfn);
1565 bool pinned = PagePinned(page);
1566
1567 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1568
1569 if (pinned) {
1570 if (!PageHighMem(page)) {
1571 xen_mc_batch();
1572
1573 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1574 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1575
1576 __set_pfn_prot(pfn, PAGE_KERNEL);
1577
1578 xen_mc_issue(PARAVIRT_LAZY_MMU);
1579 }
1580 ClearPagePinned(page);
1581 }
1582}
1583
1584static void xen_release_pte(unsigned long pfn)
1585{
1586 xen_release_ptpage(pfn, PT_PTE);
1587}
1588
1589static void xen_release_pmd(unsigned long pfn)
1590{
1591 xen_release_ptpage(pfn, PT_PMD);
1592}
1593
1594#if PAGETABLE_LEVELS == 4
1595static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1596{
1597 xen_alloc_ptpage(mm, pfn, PT_PUD);
1598}
1599
1600static void xen_release_pud(unsigned long pfn)
1601{
1602 xen_release_ptpage(pfn, PT_PUD);
1603}
1604#endif
1605
1606void __init xen_reserve_top(void)
1607{
1608#ifdef CONFIG_X86_32
1609 unsigned long top = HYPERVISOR_VIRT_START;
1610 struct xen_platform_parameters pp;
1611
1612 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1613 top = pp.virt_start;
1614
1615 reserve_top_address(-top);
1616#endif
1617}
1618
1619
1620
1621
1622
1623static void *__ka(phys_addr_t paddr)
1624{
1625#ifdef CONFIG_X86_64
1626 return (void *)(paddr + __START_KERNEL_map);
1627#else
1628 return __va(paddr);
1629#endif
1630}
1631
1632
1633static unsigned long m2p(phys_addr_t maddr)
1634{
1635 phys_addr_t paddr;
1636
1637 maddr &= PTE_PFN_MASK;
1638 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1639
1640 return paddr;
1641}
1642
1643
1644static void *m2v(phys_addr_t maddr)
1645{
1646 return __ka(m2p(maddr));
1647}
1648
1649
1650static void set_page_prot(void *addr, pgprot_t prot)
1651{
1652 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1653 pte_t pte = pfn_pte(pfn, prot);
1654
1655 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1656 BUG();
1657}
1658
1659static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1660{
1661 unsigned pmdidx, pteidx;
1662 unsigned ident_pte;
1663 unsigned long pfn;
1664
1665 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1666 PAGE_SIZE);
1667
1668 ident_pte = 0;
1669 pfn = 0;
1670 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1671 pte_t *pte_page;
1672
1673
1674 if (pmd_present(pmd[pmdidx]))
1675 pte_page = m2v(pmd[pmdidx].pmd);
1676 else {
1677
1678 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1679 break;
1680
1681 pte_page = &level1_ident_pgt[ident_pte];
1682 ident_pte += PTRS_PER_PTE;
1683
1684 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1685 }
1686
1687
1688 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1689 pte_t pte;
1690
1691#ifdef CONFIG_X86_32
1692 if (pfn > max_pfn_mapped)
1693 max_pfn_mapped = pfn;
1694#endif
1695
1696 if (!pte_none(pte_page[pteidx]))
1697 continue;
1698
1699 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1700 pte_page[pteidx] = pte;
1701 }
1702 }
1703
1704 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1705 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1706
1707 set_page_prot(pmd, PAGE_KERNEL_RO);
1708}
1709
1710void __init xen_setup_machphys_mapping(void)
1711{
1712 struct xen_machphys_mapping mapping;
1713
1714 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1715 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1716 machine_to_phys_nr = mapping.max_mfn + 1;
1717 } else {
1718 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1719 }
1720#ifdef CONFIG_X86_32
1721 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1722 < machine_to_phys_mapping);
1723#endif
1724}
1725
1726#ifdef CONFIG_X86_64
1727static void convert_pfn_mfn(void *v)
1728{
1729 pte_t *pte = v;
1730 int i;
1731
1732
1733
1734 for (i = 0; i < PTRS_PER_PTE; i++)
1735 pte[i] = xen_make_pte(pte[i].pte);
1736}
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1750 unsigned long max_pfn)
1751{
1752 pud_t *l3;
1753 pmd_t *l2;
1754
1755
1756
1757
1758
1759 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1760
1761
1762 init_level4_pgt[0] = __pgd(0);
1763
1764
1765 convert_pfn_mfn(init_level4_pgt);
1766 convert_pfn_mfn(level3_ident_pgt);
1767 convert_pfn_mfn(level3_kernel_pgt);
1768
1769 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1770 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1771
1772 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1773 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1774
1775 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1776 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1777 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1778
1779
1780 xen_map_identity_early(level2_ident_pgt, max_pfn);
1781
1782
1783 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1784 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1785 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1786 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1787 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1788 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1789
1790
1791 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1792 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1793
1794
1795 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1796
1797
1798 pgd = init_level4_pgt;
1799
1800
1801
1802
1803
1804
1805 xen_mc_batch();
1806 __xen_write_cr3(true, __pa(pgd));
1807 xen_mc_issue(PARAVIRT_LAZY_CPU);
1808
1809 memblock_reserve(__pa(xen_start_info->pt_base),
1810 xen_start_info->nr_pt_frames * PAGE_SIZE);
1811
1812 return pgd;
1813}
1814#else
1815static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1816static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1817
1818static void __init xen_write_cr3_init(unsigned long cr3)
1819{
1820 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1821
1822 BUG_ON(read_cr3() != __pa(initial_page_table));
1823 BUG_ON(cr3 != __pa(swapper_pg_dir));
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835 swapper_kernel_pmd =
1836 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1837 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
1838 sizeof(pmd_t) * PTRS_PER_PMD);
1839 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1840 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1841 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1842
1843 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1844 xen_write_cr3(cr3);
1845 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1846
1847 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1848 PFN_DOWN(__pa(initial_page_table)));
1849 set_page_prot(initial_page_table, PAGE_KERNEL);
1850 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1851
1852 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1853}
1854
1855pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1856 unsigned long max_pfn)
1857{
1858 pmd_t *kernel_pmd;
1859
1860 initial_kernel_pmd =
1861 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1862
1863 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1864 xen_start_info->nr_pt_frames * PAGE_SIZE +
1865 512*1024);
1866
1867 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1868 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1869
1870 xen_map_identity_early(initial_kernel_pmd, max_pfn);
1871
1872 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1873 initial_page_table[KERNEL_PGD_BOUNDARY] =
1874 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
1875
1876 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
1877 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
1878 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1879
1880 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1881
1882 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1883 PFN_DOWN(__pa(initial_page_table)));
1884 xen_write_cr3(__pa(initial_page_table));
1885
1886 memblock_reserve(__pa(xen_start_info->pt_base),
1887 xen_start_info->nr_pt_frames * PAGE_SIZE);
1888
1889 return initial_page_table;
1890}
1891#endif
1892
1893static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
1894
1895static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1896{
1897 pte_t pte;
1898
1899 phys >>= PAGE_SHIFT;
1900
1901 switch (idx) {
1902 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1903#ifdef CONFIG_X86_F00F_BUG
1904 case FIX_F00F_IDT:
1905#endif
1906#ifdef CONFIG_X86_32
1907 case FIX_WP_TEST:
1908 case FIX_VDSO:
1909# ifdef CONFIG_HIGHMEM
1910 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1911# endif
1912#else
1913 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1914 case VVAR_PAGE:
1915#endif
1916 case FIX_TEXT_POKE0:
1917 case FIX_TEXT_POKE1:
1918
1919 pte = pfn_pte(phys, prot);
1920 break;
1921
1922#ifdef CONFIG_X86_LOCAL_APIC
1923 case FIX_APIC_BASE:
1924 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1925 break;
1926#endif
1927
1928#ifdef CONFIG_X86_IO_APIC
1929 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
1930
1931
1932
1933
1934 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
1935 break;
1936#endif
1937
1938 case FIX_PARAVIRT_BOOTMAP:
1939
1940
1941 pte = mfn_pte(phys, prot);
1942 break;
1943
1944 default:
1945
1946 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
1947 break;
1948 }
1949
1950 __native_set_fixmap(idx, pte);
1951
1952#ifdef CONFIG_X86_64
1953
1954
1955 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
1956 idx == VVAR_PAGE) {
1957 unsigned long vaddr = __fix_to_virt(idx);
1958 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1959 }
1960#endif
1961}
1962
1963static void __init xen_post_allocator_init(void)
1964{
1965 pv_mmu_ops.set_pte = xen_set_pte;
1966 pv_mmu_ops.set_pmd = xen_set_pmd;
1967 pv_mmu_ops.set_pud = xen_set_pud;
1968#if PAGETABLE_LEVELS == 4
1969 pv_mmu_ops.set_pgd = xen_set_pgd;
1970#endif
1971
1972
1973
1974 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1975 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1976 pv_mmu_ops.release_pte = xen_release_pte;
1977 pv_mmu_ops.release_pmd = xen_release_pmd;
1978#if PAGETABLE_LEVELS == 4
1979 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1980 pv_mmu_ops.release_pud = xen_release_pud;
1981#endif
1982
1983#ifdef CONFIG_X86_64
1984 SetPagePinned(virt_to_page(level3_user_vsyscall));
1985#endif
1986 xen_mark_init_mm_pinned();
1987}
1988
1989static void xen_leave_lazy_mmu(void)
1990{
1991 preempt_disable();
1992 xen_mc_flush();
1993 paravirt_leave_lazy_mmu();
1994 preempt_enable();
1995}
1996
1997static const struct pv_mmu_ops xen_mmu_ops __initconst = {
1998 .read_cr2 = xen_read_cr2,
1999 .write_cr2 = xen_write_cr2,
2000
2001 .read_cr3 = xen_read_cr3,
2002#ifdef CONFIG_X86_32
2003 .write_cr3 = xen_write_cr3_init,
2004#else
2005 .write_cr3 = xen_write_cr3,
2006#endif
2007
2008 .flush_tlb_user = xen_flush_tlb,
2009 .flush_tlb_kernel = xen_flush_tlb,
2010 .flush_tlb_single = xen_flush_tlb_single,
2011 .flush_tlb_others = xen_flush_tlb_others,
2012
2013 .pte_update = paravirt_nop,
2014 .pte_update_defer = paravirt_nop,
2015
2016 .pgd_alloc = xen_pgd_alloc,
2017 .pgd_free = xen_pgd_free,
2018
2019 .alloc_pte = xen_alloc_pte_init,
2020 .release_pte = xen_release_pte_init,
2021 .alloc_pmd = xen_alloc_pmd_init,
2022 .release_pmd = xen_release_pmd_init,
2023
2024 .set_pte = xen_set_pte_init,
2025 .set_pte_at = xen_set_pte_at,
2026 .set_pmd = xen_set_pmd_hyper,
2027
2028 .ptep_modify_prot_start = __ptep_modify_prot_start,
2029 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2030
2031 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2032 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2033
2034 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2035 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2036
2037#ifdef CONFIG_X86_PAE
2038 .set_pte_atomic = xen_set_pte_atomic,
2039 .pte_clear = xen_pte_clear,
2040 .pmd_clear = xen_pmd_clear,
2041#endif
2042 .set_pud = xen_set_pud_hyper,
2043
2044 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2045 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2046
2047#if PAGETABLE_LEVELS == 4
2048 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2049 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2050 .set_pgd = xen_set_pgd_hyper,
2051
2052 .alloc_pud = xen_alloc_pmd_init,
2053 .release_pud = xen_release_pmd_init,
2054#endif
2055
2056 .activate_mm = xen_activate_mm,
2057 .dup_mmap = xen_dup_mmap,
2058 .exit_mmap = xen_exit_mmap,
2059
2060 .lazy_mode = {
2061 .enter = paravirt_enter_lazy_mmu,
2062 .leave = xen_leave_lazy_mmu,
2063 },
2064
2065 .set_fixmap = xen_set_fixmap,
2066};
2067
2068void __init xen_init_mmu_ops(void)
2069{
2070 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
2071 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2072 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2073 pv_mmu_ops = xen_mmu_ops;
2074
2075 memset(dummy_mapping, 0xff, PAGE_SIZE);
2076}
2077
2078
2079#define MAX_CONTIG_ORDER 9
2080static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2081
2082#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2083static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2084 unsigned long *in_frames,
2085 unsigned long *out_frames)
2086{
2087 int i;
2088 struct multicall_space mcs;
2089
2090 xen_mc_batch();
2091 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2092 mcs = __xen_mc_entry(0);
2093
2094 if (in_frames)
2095 in_frames[i] = virt_to_mfn(vaddr);
2096
2097 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2098 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2099
2100 if (out_frames)
2101 out_frames[i] = virt_to_pfn(vaddr);
2102 }
2103 xen_mc_issue(0);
2104}
2105
2106
2107
2108
2109
2110
2111static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2112 unsigned long *mfns,
2113 unsigned long first_mfn)
2114{
2115 unsigned i, limit;
2116 unsigned long mfn;
2117
2118 xen_mc_batch();
2119
2120 limit = 1u << order;
2121 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2122 struct multicall_space mcs;
2123 unsigned flags;
2124
2125 mcs = __xen_mc_entry(0);
2126 if (mfns)
2127 mfn = mfns[i];
2128 else
2129 mfn = first_mfn + i;
2130
2131 if (i < (limit - 1))
2132 flags = 0;
2133 else {
2134 if (order == 0)
2135 flags = UVMF_INVLPG | UVMF_ALL;
2136 else
2137 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2138 }
2139
2140 MULTI_update_va_mapping(mcs.mc, vaddr,
2141 mfn_pte(mfn, PAGE_KERNEL), flags);
2142
2143 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2144 }
2145
2146 xen_mc_issue(0);
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2158 unsigned long *pfns_in,
2159 unsigned long extents_out,
2160 unsigned int order_out,
2161 unsigned long *mfns_out,
2162 unsigned int address_bits)
2163{
2164 long rc;
2165 int success;
2166
2167 struct xen_memory_exchange exchange = {
2168 .in = {
2169 .nr_extents = extents_in,
2170 .extent_order = order_in,
2171 .extent_start = pfns_in,
2172 .domid = DOMID_SELF
2173 },
2174 .out = {
2175 .nr_extents = extents_out,
2176 .extent_order = order_out,
2177 .extent_start = mfns_out,
2178 .address_bits = address_bits,
2179 .domid = DOMID_SELF
2180 }
2181 };
2182
2183 BUG_ON(extents_in << order_in != extents_out << order_out);
2184
2185 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2186 success = (exchange.nr_exchanged == extents_in);
2187
2188 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2189 BUG_ON(success && (rc != 0));
2190
2191 return success;
2192}
2193
2194int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2195 unsigned int address_bits)
2196{
2197 unsigned long *in_frames = discontig_frames, out_frame;
2198 unsigned long flags;
2199 int success;
2200
2201
2202
2203
2204
2205
2206
2207 if (xen_feature(XENFEAT_auto_translated_physmap))
2208 return 0;
2209
2210 if (unlikely(order > MAX_CONTIG_ORDER))
2211 return -ENOMEM;
2212
2213 memset((void *) vstart, 0, PAGE_SIZE << order);
2214
2215 spin_lock_irqsave(&xen_reservation_lock, flags);
2216
2217
2218 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2219
2220
2221 out_frame = virt_to_pfn(vstart);
2222 success = xen_exchange_memory(1UL << order, 0, in_frames,
2223 1, order, &out_frame,
2224 address_bits);
2225
2226
2227 if (success)
2228 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2229 else
2230 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2231
2232 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2233
2234 return success ? 0 : -ENOMEM;
2235}
2236EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2237
2238void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2239{
2240 unsigned long *out_frames = discontig_frames, in_frame;
2241 unsigned long flags;
2242 int success;
2243
2244 if (xen_feature(XENFEAT_auto_translated_physmap))
2245 return;
2246
2247 if (unlikely(order > MAX_CONTIG_ORDER))
2248 return;
2249
2250 memset((void *) vstart, 0, PAGE_SIZE << order);
2251
2252 spin_lock_irqsave(&xen_reservation_lock, flags);
2253
2254
2255 in_frame = virt_to_mfn(vstart);
2256
2257
2258 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2259
2260
2261 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2262 0, out_frames, 0);
2263
2264
2265 if (success)
2266 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2267 else
2268 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2269
2270 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2271}
2272EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2273
2274#ifdef CONFIG_XEN_PVHVM
2275static void xen_hvm_exit_mmap(struct mm_struct *mm)
2276{
2277 struct xen_hvm_pagetable_dying a;
2278 int rc;
2279
2280 a.domid = DOMID_SELF;
2281 a.gpa = __pa(mm->pgd);
2282 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2283 WARN_ON_ONCE(rc < 0);
2284}
2285
2286static int is_pagetable_dying_supported(void)
2287{
2288 struct xen_hvm_pagetable_dying a;
2289 int rc = 0;
2290
2291 a.domid = DOMID_SELF;
2292 a.gpa = 0x00;
2293 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2294 if (rc < 0) {
2295 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2296 return 0;
2297 }
2298 return 1;
2299}
2300
2301void __init xen_hvm_init_mmu_ops(void)
2302{
2303 if (is_pagetable_dying_supported())
2304 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2305}
2306#endif
2307
2308#define REMAP_BATCH_SIZE 16
2309
2310struct remap_data {
2311 unsigned long mfn;
2312 pgprot_t prot;
2313 struct mmu_update *mmu_update;
2314};
2315
2316static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2317 unsigned long addr, void *data)
2318{
2319 struct remap_data *rmd = data;
2320 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2321
2322 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2323 rmd->mmu_update->val = pte_val_ma(pte);
2324 rmd->mmu_update++;
2325
2326 return 0;
2327}
2328
2329int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2330 unsigned long addr,
2331 unsigned long mfn, int nr,
2332 pgprot_t prot, unsigned domid)
2333{
2334 struct remap_data rmd;
2335 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2336 int batch;
2337 unsigned long range;
2338 int err = 0;
2339
2340 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2341
2342 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2343 (VM_PFNMAP | VM_RESERVED | VM_IO)));
2344
2345 rmd.mfn = mfn;
2346 rmd.prot = prot;
2347
2348 while (nr) {
2349 batch = min(REMAP_BATCH_SIZE, nr);
2350 range = (unsigned long)batch << PAGE_SHIFT;
2351
2352 rmd.mmu_update = mmu_update;
2353 err = apply_to_page_range(vma->vm_mm, addr, range,
2354 remap_area_mfn_pte_fn, &rmd);
2355 if (err)
2356 goto out;
2357
2358 err = -EFAULT;
2359 if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0)
2360 goto out;
2361
2362 nr -= batch;
2363 addr += range;
2364 }
2365
2366 err = 0;
2367out:
2368
2369 flush_tlb_all();
2370
2371 return err;
2372}
2373EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2374