1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/sched.h>
42#include <linux/highmem.h>
43#include <linux/debugfs.h>
44#include <linux/bug.h>
45#include <linux/vmalloc.h>
46#include <linux/module.h>
47#include <linux/gfp.h>
48#include <linux/memblock.h>
49#include <linux/seq_file.h>
50#include <linux/crash_dump.h>
51
52#include <trace/events/xen.h>
53
54#include <asm/pgtable.h>
55#include <asm/tlbflush.h>
56#include <asm/fixmap.h>
57#include <asm/mmu_context.h>
58#include <asm/setup.h>
59#include <asm/paravirt.h>
60#include <asm/e820.h>
61#include <asm/linkage.h>
62#include <asm/page.h>
63#include <asm/init.h>
64#include <asm/pat.h>
65#include <asm/smp.h>
66
67#include <asm/xen/hypercall.h>
68#include <asm/xen/hypervisor.h>
69
70#include <xen/xen.h>
71#include <xen/page.h>
72#include <xen/interface/xen.h>
73#include <xen/interface/hvm/hvm_op.h>
74#include <xen/interface/version.h>
75#include <xen/interface/memory.h>
76#include <xen/hvc-console.h>
77
78#include "multicalls.h"
79#include "mmu.h"
80#include "debugfs.h"
81
82
83
84
85
86DEFINE_SPINLOCK(xen_reservation_lock);
87
88#ifdef CONFIG_X86_32
89
90
91
92
93
94#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
95static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
96#endif
97#ifdef CONFIG_X86_64
98
99static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
100#endif
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116DEFINE_PER_CPU(unsigned long, xen_cr3);
117DEFINE_PER_CPU(unsigned long, xen_current_cr3);
118
119
120
121
122
123
124#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125
126unsigned long arbitrary_virt_to_mfn(void *vaddr)
127{
128 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
129
130 return PFN_DOWN(maddr.maddr);
131}
132
133xmaddr_t arbitrary_virt_to_machine(void *vaddr)
134{
135 unsigned long address = (unsigned long)vaddr;
136 unsigned int level;
137 pte_t *pte;
138 unsigned offset;
139
140
141
142
143
144 if (virt_addr_valid(vaddr))
145 return virt_to_machine(vaddr);
146
147
148
149 pte = lookup_address(address, &level);
150 BUG_ON(pte == NULL);
151 offset = address & ~PAGE_MASK;
152 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
153}
154EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
155
156void make_lowmem_page_readonly(void *vaddr)
157{
158 pte_t *pte, ptev;
159 unsigned long address = (unsigned long)vaddr;
160 unsigned int level;
161
162 pte = lookup_address(address, &level);
163 if (pte == NULL)
164 return;
165
166 ptev = pte_wrprotect(*pte);
167
168 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
169 BUG();
170}
171
172void make_lowmem_page_readwrite(void *vaddr)
173{
174 pte_t *pte, ptev;
175 unsigned long address = (unsigned long)vaddr;
176 unsigned int level;
177
178 pte = lookup_address(address, &level);
179 if (pte == NULL)
180 return;
181
182 ptev = pte_mkwrite(*pte);
183
184 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
185 BUG();
186}
187
188
189static bool xen_page_pinned(void *ptr)
190{
191 struct page *page = virt_to_page(ptr);
192
193 return PagePinned(page);
194}
195
196void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
197{
198 struct multicall_space mcs;
199 struct mmu_update *u;
200
201 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
202
203 mcs = xen_mc_entry(sizeof(*u));
204 u = mcs.args;
205
206
207 u->ptr = virt_to_machine(ptep).maddr;
208 u->val = pte_val_ma(pteval);
209
210 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
211
212 xen_mc_issue(PARAVIRT_LAZY_MMU);
213}
214EXPORT_SYMBOL_GPL(xen_set_domain_pte);
215
216static void xen_extend_mmu_update(const struct mmu_update *update)
217{
218 struct multicall_space mcs;
219 struct mmu_update *u;
220
221 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
222
223 if (mcs.mc != NULL) {
224 mcs.mc->args[1]++;
225 } else {
226 mcs = __xen_mc_entry(sizeof(*u));
227 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
228 }
229
230 u = mcs.args;
231 *u = *update;
232}
233
234static void xen_extend_mmuext_op(const struct mmuext_op *op)
235{
236 struct multicall_space mcs;
237 struct mmuext_op *u;
238
239 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
240
241 if (mcs.mc != NULL) {
242 mcs.mc->args[1]++;
243 } else {
244 mcs = __xen_mc_entry(sizeof(*u));
245 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
246 }
247
248 u = mcs.args;
249 *u = *op;
250}
251
252static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
253{
254 struct mmu_update u;
255
256 preempt_disable();
257
258 xen_mc_batch();
259
260
261 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
262 u.val = pmd_val_ma(val);
263 xen_extend_mmu_update(&u);
264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266
267 preempt_enable();
268}
269
270static void xen_set_pmd(pmd_t *ptr, pmd_t val)
271{
272 trace_xen_mmu_set_pmd(ptr, val);
273
274
275
276 if (!xen_page_pinned(ptr)) {
277 *ptr = val;
278 return;
279 }
280
281 xen_set_pmd_hyper(ptr, val);
282}
283
284
285
286
287
288void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
289{
290 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
291}
292
293static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
294{
295 struct mmu_update u;
296
297 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
298 return false;
299
300 xen_mc_batch();
301
302 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
303 u.val = pte_val_ma(pteval);
304 xen_extend_mmu_update(&u);
305
306 xen_mc_issue(PARAVIRT_LAZY_MMU);
307
308 return true;
309}
310
311static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
312{
313 if (!xen_batched_set_pte(ptep, pteval)) {
314
315
316
317
318
319
320
321 struct mmu_update u;
322
323 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
324 u.val = pte_val_ma(pteval);
325 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
326 }
327}
328
329static void xen_set_pte(pte_t *ptep, pte_t pteval)
330{
331 trace_xen_mmu_set_pte(ptep, pteval);
332 __xen_set_pte(ptep, pteval);
333}
334
335static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
336 pte_t *ptep, pte_t pteval)
337{
338 trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
339 __xen_set_pte(ptep, pteval);
340}
341
342pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
343 unsigned long addr, pte_t *ptep)
344{
345
346 trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
347 return *ptep;
348}
349
350void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
351 pte_t *ptep, pte_t pte)
352{
353 struct mmu_update u;
354
355 trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
356 xen_mc_batch();
357
358 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
359 u.val = pte_val_ma(pte);
360 xen_extend_mmu_update(&u);
361
362 xen_mc_issue(PARAVIRT_LAZY_MMU);
363}
364
365
366static pteval_t pte_mfn_to_pfn(pteval_t val)
367{
368 if (val & _PAGE_PRESENT) {
369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
370 unsigned long pfn = mfn_to_pfn(mfn);
371
372 pteval_t flags = val & PTE_FLAGS_MASK;
373 if (unlikely(pfn == ~0))
374 val = flags & ~_PAGE_PRESENT;
375 else
376 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
377 }
378
379 return val;
380}
381
382static pteval_t pte_pfn_to_mfn(pteval_t val)
383{
384 if (val & _PAGE_PRESENT) {
385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
386 pteval_t flags = val & PTE_FLAGS_MASK;
387 unsigned long mfn;
388
389 if (!xen_feature(XENFEAT_auto_translated_physmap))
390 mfn = get_phys_to_machine(pfn);
391 else
392 mfn = pfn;
393
394
395
396
397
398
399 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
400 mfn = 0;
401 flags = 0;
402 } else {
403
404
405
406
407
408 mfn &= ~FOREIGN_FRAME_BIT;
409 if (mfn & IDENTITY_FRAME_BIT) {
410 mfn &= ~IDENTITY_FRAME_BIT;
411 flags |= _PAGE_IOMAP;
412 }
413 }
414 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
415 }
416
417 return val;
418}
419
420static pteval_t iomap_pte(pteval_t val)
421{
422 if (val & _PAGE_PRESENT) {
423 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
424 pteval_t flags = val & PTE_FLAGS_MASK;
425
426
427
428 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
429 }
430
431 return val;
432}
433
434static pteval_t xen_pte_val(pte_t pte)
435{
436 pteval_t pteval = pte.pte;
437#if 0
438
439 if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) {
440 WARN_ON(!pat_enabled);
441 pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT;
442 }
443#endif
444 if (xen_initial_domain() && (pteval & _PAGE_IOMAP))
445 return pteval;
446
447 return pte_mfn_to_pfn(pteval);
448}
449PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
450
451static pgdval_t xen_pgd_val(pgd_t pgd)
452{
453 return pte_mfn_to_pfn(pgd.pgd);
454}
455PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475void xen_set_pat(u64 pat)
476{
477
478
479 WARN_ON(pat != 0x0007010600070106ull);
480}
481
482static pte_t xen_make_pte(pteval_t pte)
483{
484 phys_addr_t addr = (pte & PTE_PFN_MASK);
485#if 0
486
487
488
489
490
491
492
493
494 if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) {
495 if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT)
496 pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT;
497 }
498#endif
499
500
501
502
503
504
505 if (unlikely(pte & _PAGE_IOMAP) &&
506 (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
507 pte = iomap_pte(pte);
508 } else {
509 pte &= ~_PAGE_IOMAP;
510 pte = pte_pfn_to_mfn(pte);
511 }
512
513 return native_make_pte(pte);
514}
515PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
516
517static pgd_t xen_make_pgd(pgdval_t pgd)
518{
519 pgd = pte_pfn_to_mfn(pgd);
520 return native_make_pgd(pgd);
521}
522PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
523
524static pmdval_t xen_pmd_val(pmd_t pmd)
525{
526 return pte_mfn_to_pfn(pmd.pmd);
527}
528PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
529
530static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
531{
532 struct mmu_update u;
533
534 preempt_disable();
535
536 xen_mc_batch();
537
538
539 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
540 u.val = pud_val_ma(val);
541 xen_extend_mmu_update(&u);
542
543 xen_mc_issue(PARAVIRT_LAZY_MMU);
544
545 preempt_enable();
546}
547
548static void xen_set_pud(pud_t *ptr, pud_t val)
549{
550 trace_xen_mmu_set_pud(ptr, val);
551
552
553
554 if (!xen_page_pinned(ptr)) {
555 *ptr = val;
556 return;
557 }
558
559 xen_set_pud_hyper(ptr, val);
560}
561
562#ifdef CONFIG_X86_PAE
563static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
564{
565 trace_xen_mmu_set_pte_atomic(ptep, pte);
566 set_64bit((u64 *)ptep, native_pte_val(pte));
567}
568
569static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
570{
571 trace_xen_mmu_pte_clear(mm, addr, ptep);
572 if (!xen_batched_set_pte(ptep, native_make_pte(0)))
573 native_pte_clear(mm, addr, ptep);
574}
575
576static void xen_pmd_clear(pmd_t *pmdp)
577{
578 trace_xen_mmu_pmd_clear(pmdp);
579 set_pmd(pmdp, __pmd(0));
580}
581#endif
582
583static pmd_t xen_make_pmd(pmdval_t pmd)
584{
585 pmd = pte_pfn_to_mfn(pmd);
586 return native_make_pmd(pmd);
587}
588PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
589
590#if PAGETABLE_LEVELS == 4
591static pudval_t xen_pud_val(pud_t pud)
592{
593 return pte_mfn_to_pfn(pud.pud);
594}
595PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
596
597static pud_t xen_make_pud(pudval_t pud)
598{
599 pud = pte_pfn_to_mfn(pud);
600
601 return native_make_pud(pud);
602}
603PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
604
605static pgd_t *xen_get_user_pgd(pgd_t *pgd)
606{
607 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
608 unsigned offset = pgd - pgd_page;
609 pgd_t *user_ptr = NULL;
610
611 if (offset < pgd_index(USER_LIMIT)) {
612 struct page *page = virt_to_page(pgd_page);
613 user_ptr = (pgd_t *)page->private;
614 if (user_ptr)
615 user_ptr += offset;
616 }
617
618 return user_ptr;
619}
620
621static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
622{
623 struct mmu_update u;
624
625 u.ptr = virt_to_machine(ptr).maddr;
626 u.val = pgd_val_ma(val);
627 xen_extend_mmu_update(&u);
628}
629
630
631
632
633
634
635
636
637static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
638{
639 preempt_disable();
640
641 xen_mc_batch();
642
643 __xen_set_pgd_hyper(ptr, val);
644
645 xen_mc_issue(PARAVIRT_LAZY_MMU);
646
647 preempt_enable();
648}
649
650static void xen_set_pgd(pgd_t *ptr, pgd_t val)
651{
652 pgd_t *user_ptr = xen_get_user_pgd(ptr);
653
654 trace_xen_mmu_set_pgd(ptr, user_ptr, val);
655
656
657
658 if (!xen_page_pinned(ptr)) {
659 *ptr = val;
660 if (user_ptr) {
661 WARN_ON(xen_page_pinned(user_ptr));
662 *user_ptr = val;
663 }
664 return;
665 }
666
667
668
669 xen_mc_batch();
670
671 __xen_set_pgd_hyper(ptr, val);
672 if (user_ptr)
673 __xen_set_pgd_hyper(user_ptr, val);
674
675 xen_mc_issue(PARAVIRT_LAZY_MMU);
676}
677#endif
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
695 int (*func)(struct mm_struct *mm, struct page *,
696 enum pt_level),
697 unsigned long limit)
698{
699 int flush = 0;
700 unsigned hole_low, hole_high;
701 unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
702 unsigned pgdidx, pudidx, pmdidx;
703
704
705 limit--;
706 BUG_ON(limit >= FIXADDR_TOP);
707
708 if (xen_feature(XENFEAT_auto_translated_physmap))
709 return 0;
710
711
712
713
714
715
716 hole_low = pgd_index(USER_LIMIT);
717 hole_high = pgd_index(PAGE_OFFSET);
718
719 pgdidx_limit = pgd_index(limit);
720#if PTRS_PER_PUD > 1
721 pudidx_limit = pud_index(limit);
722#else
723 pudidx_limit = 0;
724#endif
725#if PTRS_PER_PMD > 1
726 pmdidx_limit = pmd_index(limit);
727#else
728 pmdidx_limit = 0;
729#endif
730
731 for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
732 pud_t *pud;
733
734 if (pgdidx >= hole_low && pgdidx < hole_high)
735 continue;
736
737 if (!pgd_val(pgd[pgdidx]))
738 continue;
739
740 pud = pud_offset(&pgd[pgdidx], 0);
741
742 if (PTRS_PER_PUD > 1)
743 flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
744
745 for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
746 pmd_t *pmd;
747
748 if (pgdidx == pgdidx_limit &&
749 pudidx > pudidx_limit)
750 goto out;
751
752 if (pud_none(pud[pudidx]))
753 continue;
754
755 pmd = pmd_offset(&pud[pudidx], 0);
756
757 if (PTRS_PER_PMD > 1)
758 flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
759
760 for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
761 struct page *pte;
762
763 if (pgdidx == pgdidx_limit &&
764 pudidx == pudidx_limit &&
765 pmdidx > pmdidx_limit)
766 goto out;
767
768 if (pmd_none(pmd[pmdidx]))
769 continue;
770
771 pte = pmd_page(pmd[pmdidx]);
772 flush |= (*func)(mm, pte, PT_PTE);
773 }
774 }
775 }
776
777out:
778
779
780 flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
781
782 return flush;
783}
784
785static int xen_pgd_walk(struct mm_struct *mm,
786 int (*func)(struct mm_struct *mm, struct page *,
787 enum pt_level),
788 unsigned long limit)
789{
790 return __xen_pgd_walk(mm, mm->pgd, func, limit);
791}
792
793
794
795static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
796{
797 spinlock_t *ptl = NULL;
798
799#if USE_SPLIT_PTLOCKS
800 ptl = __pte_lockptr(page);
801 spin_lock_nest_lock(ptl, &mm->page_table_lock);
802#endif
803
804 return ptl;
805}
806
807static void xen_pte_unlock(void *v)
808{
809 spinlock_t *ptl = v;
810 spin_unlock(ptl);
811}
812
813static void xen_do_pin(unsigned level, unsigned long pfn)
814{
815 struct mmuext_op op;
816
817 op.cmd = level;
818 op.arg1.mfn = pfn_to_mfn(pfn);
819
820 xen_extend_mmuext_op(&op);
821}
822
823static int xen_pin_page(struct mm_struct *mm, struct page *page,
824 enum pt_level level)
825{
826 unsigned pgfl = TestSetPagePinned(page);
827 int flush;
828
829 if (pgfl)
830 flush = 0;
831 else if (PageHighMem(page))
832
833
834 flush = 1;
835 else {
836 void *pt = lowmem_page_address(page);
837 unsigned long pfn = page_to_pfn(page);
838 struct multicall_space mcs = __xen_mc_entry(0);
839 spinlock_t *ptl;
840
841 flush = 0;
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863 ptl = NULL;
864 if (level == PT_PTE)
865 ptl = xen_pte_lock(page, mm);
866
867 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
868 pfn_pte(pfn, PAGE_KERNEL_RO),
869 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
870
871 if (ptl) {
872 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
873
874
875
876 xen_mc_callback(xen_pte_unlock, ptl);
877 }
878 }
879
880 return flush;
881}
882
883
884
885
886static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
887{
888 trace_xen_mmu_pgd_pin(mm, pgd);
889
890 xen_mc_batch();
891
892 if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
893
894 xen_mc_issue(0);
895
896 kmap_flush_unused();
897
898 xen_mc_batch();
899 }
900
901#ifdef CONFIG_X86_64
902 {
903 pgd_t *user_pgd = xen_get_user_pgd(pgd);
904
905 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
906
907 if (user_pgd) {
908 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
909 xen_do_pin(MMUEXT_PIN_L4_TABLE,
910 PFN_DOWN(__pa(user_pgd)));
911 }
912 }
913#else
914#ifdef CONFIG_X86_PAE
915
916 xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
917 PT_PMD);
918#endif
919 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
920#endif
921 xen_mc_issue(0);
922}
923
924static void xen_pgd_pin(struct mm_struct *mm)
925{
926 __xen_pgd_pin(mm, mm->pgd);
927}
928
929
930
931
932
933
934
935
936
937
938
939void xen_mm_pin_all(void)
940{
941 struct page *page;
942
943 spin_lock(&pgd_lock);
944
945 list_for_each_entry(page, &pgd_list, lru) {
946 if (!PagePinned(page)) {
947 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
948 SetPageSavePinned(page);
949 }
950 }
951
952 spin_unlock(&pgd_lock);
953}
954
955
956
957
958
959
960static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
961 enum pt_level level)
962{
963 SetPagePinned(page);
964 return 0;
965}
966
967static void __init xen_mark_init_mm_pinned(void)
968{
969 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
970}
971
972static int xen_unpin_page(struct mm_struct *mm, struct page *page,
973 enum pt_level level)
974{
975 unsigned pgfl = TestClearPagePinned(page);
976
977 if (pgfl && !PageHighMem(page)) {
978 void *pt = lowmem_page_address(page);
979 unsigned long pfn = page_to_pfn(page);
980 spinlock_t *ptl = NULL;
981 struct multicall_space mcs;
982
983
984
985
986
987
988
989
990 if (level == PT_PTE) {
991 ptl = xen_pte_lock(page, mm);
992
993 if (ptl)
994 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
995 }
996
997 mcs = __xen_mc_entry(0);
998
999 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
1000 pfn_pte(pfn, PAGE_KERNEL),
1001 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
1002
1003 if (ptl) {
1004
1005 xen_mc_callback(xen_pte_unlock, ptl);
1006 }
1007 }
1008
1009 return 0;
1010}
1011
1012
1013static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
1014{
1015 trace_xen_mmu_pgd_unpin(mm, pgd);
1016
1017 xen_mc_batch();
1018
1019 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1020
1021#ifdef CONFIG_X86_64
1022 {
1023 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1024
1025 if (user_pgd) {
1026 xen_do_pin(MMUEXT_UNPIN_TABLE,
1027 PFN_DOWN(__pa(user_pgd)));
1028 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
1029 }
1030 }
1031#endif
1032
1033#ifdef CONFIG_X86_PAE
1034
1035 xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
1036 PT_PMD);
1037#endif
1038
1039 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
1040
1041 xen_mc_issue(0);
1042}
1043
1044static void xen_pgd_unpin(struct mm_struct *mm)
1045{
1046 __xen_pgd_unpin(mm, mm->pgd);
1047}
1048
1049
1050
1051
1052
1053void xen_mm_unpin_all(void)
1054{
1055 struct page *page;
1056
1057 spin_lock(&pgd_lock);
1058
1059 list_for_each_entry(page, &pgd_list, lru) {
1060 if (PageSavePinned(page)) {
1061 BUG_ON(!PagePinned(page));
1062 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
1063 ClearPageSavePinned(page);
1064 }
1065 }
1066
1067 spin_unlock(&pgd_lock);
1068}
1069
1070static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
1071{
1072 spin_lock(&next->page_table_lock);
1073 xen_pgd_pin(next);
1074 spin_unlock(&next->page_table_lock);
1075}
1076
1077static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
1078{
1079 spin_lock(&mm->page_table_lock);
1080 xen_pgd_pin(mm);
1081 spin_unlock(&mm->page_table_lock);
1082}
1083
1084
1085#ifdef CONFIG_SMP
1086
1087
1088static void drop_other_mm_ref(void *info)
1089{
1090 struct mm_struct *mm = info;
1091 struct mm_struct *active_mm;
1092
1093 active_mm = this_cpu_read(cpu_tlbstate.active_mm);
1094
1095 if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK)
1096 leave_mm(smp_processor_id());
1097
1098
1099
1100 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
1101 load_cr3(swapper_pg_dir);
1102}
1103
1104static void xen_drop_mm_ref(struct mm_struct *mm)
1105{
1106 cpumask_var_t mask;
1107 unsigned cpu;
1108
1109 if (current->active_mm == mm) {
1110 if (current->mm == mm)
1111 load_cr3(swapper_pg_dir);
1112 else
1113 leave_mm(smp_processor_id());
1114 }
1115
1116
1117 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
1118 for_each_online_cpu(cpu) {
1119 if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
1120 && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
1121 continue;
1122 smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
1123 }
1124 return;
1125 }
1126 cpumask_copy(mask, mm_cpumask(mm));
1127
1128
1129
1130
1131
1132
1133 for_each_online_cpu(cpu) {
1134 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
1135 cpumask_set_cpu(cpu, mask);
1136 }
1137
1138 if (!cpumask_empty(mask))
1139 smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
1140 free_cpumask_var(mask);
1141}
1142#else
1143static void xen_drop_mm_ref(struct mm_struct *mm)
1144{
1145 if (current->active_mm == mm)
1146 load_cr3(swapper_pg_dir);
1147}
1148#endif
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164static void xen_exit_mmap(struct mm_struct *mm)
1165{
1166 get_cpu();
1167 xen_drop_mm_ref(mm);
1168 put_cpu();
1169
1170 spin_lock(&mm->page_table_lock);
1171
1172
1173 if (xen_page_pinned(mm->pgd))
1174 xen_pgd_unpin(mm);
1175
1176 spin_unlock(&mm->page_table_lock);
1177}
1178
1179static void xen_post_allocator_init(void);
1180
1181#ifdef CONFIG_X86_64
1182static void __init xen_cleanhighmap(unsigned long vaddr,
1183 unsigned long vaddr_end)
1184{
1185 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1186 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1187
1188
1189
1190 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
1191 pmd++, vaddr += PMD_SIZE) {
1192 if (pmd_none(*pmd))
1193 continue;
1194 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1195 set_pmd(pmd, __pmd(0));
1196 }
1197
1198
1199 xen_mc_flush();
1200}
1201#endif
1202static void __init xen_pagetable_init(void)
1203{
1204#ifdef CONFIG_X86_64
1205 unsigned long size;
1206 unsigned long addr;
1207#endif
1208 paging_init();
1209 xen_setup_shared_info();
1210#ifdef CONFIG_X86_64
1211 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1212 unsigned long new_mfn_list;
1213
1214 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1215
1216
1217 new_mfn_list = xen_revector_p2m_tree();
1218 if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
1219
1220 memset((void *)xen_start_info->mfn_list, 0xff, size);
1221
1222
1223 BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
1224 addr = xen_start_info->mfn_list;
1225
1226
1227
1228
1229 size = roundup(size, PMD_SIZE);
1230 xen_cleanhighmap(addr, addr + size);
1231
1232 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1233 memblock_free(__pa(xen_start_info->mfn_list), size);
1234
1235 xen_start_info->mfn_list = new_mfn_list;
1236 } else
1237 goto skip;
1238 }
1239
1240
1241
1242
1243
1244
1245
1246
1247 addr = xen_start_info->pt_base;
1248 size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE);
1249
1250 xen_cleanhighmap(addr, addr + size);
1251 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1252#ifdef DEBUG
1253
1254
1255
1256 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
1257#endif
1258skip:
1259#endif
1260 xen_post_allocator_init();
1261}
1262static void xen_write_cr2(unsigned long cr2)
1263{
1264 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1265}
1266
1267static unsigned long xen_read_cr2(void)
1268{
1269 return this_cpu_read(xen_vcpu)->arch.cr2;
1270}
1271
1272unsigned long xen_read_cr2_direct(void)
1273{
1274 return this_cpu_read(xen_vcpu_info.arch.cr2);
1275}
1276
1277void xen_flush_tlb_all(void)
1278{
1279 struct mmuext_op *op;
1280 struct multicall_space mcs;
1281
1282 trace_xen_mmu_flush_tlb_all(0);
1283
1284 preempt_disable();
1285
1286 mcs = xen_mc_entry(sizeof(*op));
1287
1288 op = mcs.args;
1289 op->cmd = MMUEXT_TLB_FLUSH_ALL;
1290 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1291
1292 xen_mc_issue(PARAVIRT_LAZY_MMU);
1293
1294 preempt_enable();
1295}
1296static void xen_flush_tlb(void)
1297{
1298 struct mmuext_op *op;
1299 struct multicall_space mcs;
1300
1301 trace_xen_mmu_flush_tlb(0);
1302
1303 preempt_disable();
1304
1305 mcs = xen_mc_entry(sizeof(*op));
1306
1307 op = mcs.args;
1308 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1309 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1310
1311 xen_mc_issue(PARAVIRT_LAZY_MMU);
1312
1313 preempt_enable();
1314}
1315
1316static void xen_flush_tlb_single(unsigned long addr)
1317{
1318 struct mmuext_op *op;
1319 struct multicall_space mcs;
1320
1321 trace_xen_mmu_flush_tlb_single(addr);
1322
1323 preempt_disable();
1324
1325 mcs = xen_mc_entry(sizeof(*op));
1326 op = mcs.args;
1327 op->cmd = MMUEXT_INVLPG_LOCAL;
1328 op->arg1.linear_addr = addr & PAGE_MASK;
1329 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1330
1331 xen_mc_issue(PARAVIRT_LAZY_MMU);
1332
1333 preempt_enable();
1334}
1335
1336static void xen_flush_tlb_others(const struct cpumask *cpus,
1337 struct mm_struct *mm, unsigned long start,
1338 unsigned long end)
1339{
1340 struct {
1341 struct mmuext_op op;
1342#ifdef CONFIG_SMP
1343 DECLARE_BITMAP(mask, num_processors);
1344#else
1345 DECLARE_BITMAP(mask, NR_CPUS);
1346#endif
1347 } *args;
1348 struct multicall_space mcs;
1349
1350 trace_xen_mmu_flush_tlb_others(cpus, mm, start, end);
1351
1352 if (cpumask_empty(cpus))
1353 return;
1354
1355 mcs = xen_mc_entry(sizeof(*args));
1356 args = mcs.args;
1357 args->op.arg2.vcpumask = to_cpumask(args->mask);
1358
1359
1360 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1361 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1362
1363 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1364 if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
1365 args->op.cmd = MMUEXT_INVLPG_MULTI;
1366 args->op.arg1.linear_addr = start;
1367 }
1368
1369 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1370
1371 xen_mc_issue(PARAVIRT_LAZY_MMU);
1372}
1373
1374static unsigned long xen_read_cr3(void)
1375{
1376 return this_cpu_read(xen_cr3);
1377}
1378
1379static void set_current_cr3(void *v)
1380{
1381 this_cpu_write(xen_current_cr3, (unsigned long)v);
1382}
1383
1384static void __xen_write_cr3(bool kernel, unsigned long cr3)
1385{
1386 struct mmuext_op op;
1387 unsigned long mfn;
1388
1389 trace_xen_mmu_write_cr3(kernel, cr3);
1390
1391 if (cr3)
1392 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1393 else
1394 mfn = 0;
1395
1396 WARN_ON(mfn == 0 && kernel);
1397
1398 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1399 op.arg1.mfn = mfn;
1400
1401 xen_extend_mmuext_op(&op);
1402
1403 if (kernel) {
1404 this_cpu_write(xen_cr3, cr3);
1405
1406
1407
1408 xen_mc_callback(set_current_cr3, (void *)cr3);
1409 }
1410}
1411static void xen_write_cr3(unsigned long cr3)
1412{
1413 BUG_ON(preemptible());
1414
1415 xen_mc_batch();
1416
1417
1418
1419 this_cpu_write(xen_cr3, cr3);
1420
1421 __xen_write_cr3(true, cr3);
1422
1423#ifdef CONFIG_X86_64
1424 {
1425 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1426 if (user_pgd)
1427 __xen_write_cr3(false, __pa(user_pgd));
1428 else
1429 __xen_write_cr3(false, 0);
1430 }
1431#endif
1432
1433 xen_mc_issue(PARAVIRT_LAZY_CPU);
1434}
1435
1436#ifdef CONFIG_X86_64
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457static void __init xen_write_cr3_init(unsigned long cr3)
1458{
1459 BUG_ON(preemptible());
1460
1461 xen_mc_batch();
1462
1463
1464
1465 this_cpu_write(xen_cr3, cr3);
1466
1467 __xen_write_cr3(true, cr3);
1468
1469 xen_mc_issue(PARAVIRT_LAZY_CPU);
1470}
1471#endif
1472
1473static int xen_pgd_alloc(struct mm_struct *mm)
1474{
1475 pgd_t *pgd = mm->pgd;
1476 int ret = 0;
1477
1478 BUG_ON(PagePinned(virt_to_page(pgd)));
1479
1480#ifdef CONFIG_X86_64
1481 {
1482 struct page *page = virt_to_page(pgd);
1483 pgd_t *user_pgd;
1484
1485 BUG_ON(page->private != 0);
1486
1487 ret = -ENOMEM;
1488
1489 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1490 page->private = (unsigned long)user_pgd;
1491
1492 if (user_pgd != NULL) {
1493 user_pgd[pgd_index(VSYSCALL_START)] =
1494 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1495 ret = 0;
1496 }
1497
1498 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1499 }
1500#endif
1501
1502 return ret;
1503}
1504
1505static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1506{
1507#ifdef CONFIG_X86_64
1508 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1509
1510 if (user_pgd)
1511 free_page((unsigned long)user_pgd);
1512#endif
1513}
1514
1515#ifdef CONFIG_X86_32
1516static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1517{
1518
1519 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1520 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1521 pte_val_ma(pte));
1522
1523 return pte;
1524}
1525#else
1526static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1527{
1528 return pte;
1529}
1530#endif
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1547{
1548 if (pte_mfn(pte) != INVALID_P2M_ENTRY)
1549 pte = mask_rw_pte(ptep, pte);
1550 else
1551 pte = __pte_ma(0);
1552
1553 native_set_pte(ptep, pte);
1554}
1555
1556static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1557{
1558 struct mmuext_op op;
1559 op.cmd = cmd;
1560 op.arg1.mfn = pfn_to_mfn(pfn);
1561 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1562 BUG();
1563}
1564
1565
1566
1567static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1568{
1569#ifdef CONFIG_FLATMEM
1570 BUG_ON(mem_map);
1571#endif
1572 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1573 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1574}
1575
1576
1577static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1578{
1579#ifdef CONFIG_FLATMEM
1580 BUG_ON(mem_map);
1581#endif
1582 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1583}
1584
1585
1586
1587static void __init xen_release_pte_init(unsigned long pfn)
1588{
1589 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1590 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1591}
1592
1593static void __init xen_release_pmd_init(unsigned long pfn)
1594{
1595 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1596}
1597
1598static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1599{
1600 struct multicall_space mcs;
1601 struct mmuext_op *op;
1602
1603 mcs = __xen_mc_entry(sizeof(*op));
1604 op = mcs.args;
1605 op->cmd = cmd;
1606 op->arg1.mfn = pfn_to_mfn(pfn);
1607
1608 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1609}
1610
1611static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1612{
1613 struct multicall_space mcs;
1614 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1615
1616 mcs = __xen_mc_entry(0);
1617 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1618 pfn_pte(pfn, prot), 0);
1619}
1620
1621
1622
1623static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1624 unsigned level)
1625{
1626 bool pinned = PagePinned(virt_to_page(mm->pgd));
1627
1628 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1629
1630 if (pinned) {
1631 struct page *page = pfn_to_page(pfn);
1632
1633 SetPagePinned(page);
1634
1635 if (!PageHighMem(page)) {
1636 xen_mc_batch();
1637
1638 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1639
1640 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1641 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1642
1643 xen_mc_issue(PARAVIRT_LAZY_MMU);
1644 } else {
1645
1646
1647 kmap_flush_unused();
1648 }
1649 }
1650}
1651
1652static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1653{
1654 xen_alloc_ptpage(mm, pfn, PT_PTE);
1655}
1656
1657static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1658{
1659 xen_alloc_ptpage(mm, pfn, PT_PMD);
1660}
1661
1662
1663static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1664{
1665 struct page *page = pfn_to_page(pfn);
1666 bool pinned = PagePinned(page);
1667
1668 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1669
1670 if (pinned) {
1671 if (!PageHighMem(page)) {
1672 xen_mc_batch();
1673
1674 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1675 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1676
1677 __set_pfn_prot(pfn, PAGE_KERNEL);
1678
1679 xen_mc_issue(PARAVIRT_LAZY_MMU);
1680 }
1681 ClearPagePinned(page);
1682 }
1683}
1684
1685static void xen_release_pte(unsigned long pfn)
1686{
1687 xen_release_ptpage(pfn, PT_PTE);
1688}
1689
1690static void xen_release_pmd(unsigned long pfn)
1691{
1692 xen_release_ptpage(pfn, PT_PMD);
1693}
1694
1695#if PAGETABLE_LEVELS == 4
1696static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1697{
1698 xen_alloc_ptpage(mm, pfn, PT_PUD);
1699}
1700
1701static void xen_release_pud(unsigned long pfn)
1702{
1703 xen_release_ptpage(pfn, PT_PUD);
1704}
1705#endif
1706
1707void __init xen_reserve_top(void)
1708{
1709#ifdef CONFIG_X86_32
1710 unsigned long top = HYPERVISOR_VIRT_START;
1711 struct xen_platform_parameters pp;
1712
1713 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1714 top = pp.virt_start;
1715
1716 reserve_top_address(-top);
1717#endif
1718}
1719
1720
1721
1722
1723
1724static void *__ka(phys_addr_t paddr)
1725{
1726#ifdef CONFIG_X86_64
1727 return (void *)(paddr + __START_KERNEL_map);
1728#else
1729 return __va(paddr);
1730#endif
1731}
1732
1733
1734static unsigned long m2p(phys_addr_t maddr)
1735{
1736 phys_addr_t paddr;
1737
1738 maddr &= PTE_PFN_MASK;
1739 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1740
1741 return paddr;
1742}
1743
1744
1745static void *m2v(phys_addr_t maddr)
1746{
1747 return __ka(m2p(maddr));
1748}
1749
1750
1751static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
1752{
1753 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1754 pte_t pte = pfn_pte(pfn, prot);
1755
1756 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1757 BUG();
1758}
1759static void set_page_prot(void *addr, pgprot_t prot)
1760{
1761 return set_page_prot_flags(addr, prot, UVMF_NONE);
1762}
1763#ifdef CONFIG_X86_32
1764static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1765{
1766 unsigned pmdidx, pteidx;
1767 unsigned ident_pte;
1768 unsigned long pfn;
1769
1770 level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1771 PAGE_SIZE);
1772
1773 ident_pte = 0;
1774 pfn = 0;
1775 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1776 pte_t *pte_page;
1777
1778
1779 if (pmd_present(pmd[pmdidx]))
1780 pte_page = m2v(pmd[pmdidx].pmd);
1781 else {
1782
1783 if (ident_pte == LEVEL1_IDENT_ENTRIES)
1784 break;
1785
1786 pte_page = &level1_ident_pgt[ident_pte];
1787 ident_pte += PTRS_PER_PTE;
1788
1789 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1790 }
1791
1792
1793 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1794 pte_t pte;
1795
1796#ifdef CONFIG_X86_32
1797 if (pfn > max_pfn_mapped)
1798 max_pfn_mapped = pfn;
1799#endif
1800
1801 if (!pte_none(pte_page[pteidx]))
1802 continue;
1803
1804 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1805 pte_page[pteidx] = pte;
1806 }
1807 }
1808
1809 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1810 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1811
1812 set_page_prot(pmd, PAGE_KERNEL_RO);
1813}
1814#endif
1815void __init xen_setup_machphys_mapping(void)
1816{
1817 struct xen_machphys_mapping mapping;
1818
1819 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1820 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1821 machine_to_phys_nr = mapping.max_mfn + 1;
1822 } else {
1823 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1824 }
1825#ifdef CONFIG_X86_32
1826 WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1827 < machine_to_phys_mapping);
1828#endif
1829}
1830
1831#ifdef CONFIG_X86_64
1832static void convert_pfn_mfn(void *v)
1833{
1834 pte_t *pte = v;
1835 int i;
1836
1837
1838
1839 for (i = 0; i < PTRS_PER_PTE; i++)
1840 pte[i] = xen_make_pte(pte[i].pte);
1841}
1842static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1843 unsigned long addr)
1844{
1845 if (*pt_base == PFN_DOWN(__pa(addr))) {
1846 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1847 clear_page((void *)addr);
1848 (*pt_base)++;
1849 }
1850 if (*pt_end == PFN_DOWN(__pa(addr))) {
1851 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1852 clear_page((void *)addr);
1853 (*pt_end)--;
1854 }
1855}
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1868{
1869 pud_t *l3;
1870 pmd_t *l2;
1871 unsigned long addr[3];
1872 unsigned long pt_base, pt_end;
1873 unsigned i;
1874
1875
1876
1877
1878
1879 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1880
1881 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1882 pt_end = pt_base + xen_start_info->nr_pt_frames;
1883
1884
1885 init_level4_pgt[0] = __pgd(0);
1886
1887
1888
1889
1890 convert_pfn_mfn(init_level4_pgt);
1891
1892
1893 convert_pfn_mfn(level3_ident_pgt);
1894
1895
1896 convert_pfn_mfn(level3_kernel_pgt);
1897
1898
1899 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1900 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1901
1902 addr[0] = (unsigned long)pgd;
1903 addr[1] = (unsigned long)l3;
1904 addr[2] = (unsigned long)l2;
1905
1906
1907
1908
1909
1910
1911 copy_page(level2_ident_pgt, l2);
1912
1913 copy_page(level2_kernel_pgt, l2);
1914
1915
1916 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1917 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1918 copy_page(level2_fixmap_pgt, l2);
1919
1920
1921
1922
1923 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1924 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1925 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1926 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1927 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1928 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1929 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1930
1931
1932 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1933 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1934
1935
1936 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1937
1938
1939
1940
1941
1942
1943 xen_mc_batch();
1944 __xen_write_cr3(true, __pa(init_level4_pgt));
1945 xen_mc_issue(PARAVIRT_LAZY_CPU);
1946
1947
1948
1949
1950
1951
1952
1953 for (i = 0; i < ARRAY_SIZE(addr); i++)
1954 check_pt_base(&pt_base, &pt_end, addr[i]);
1955
1956
1957 memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE);
1958
1959 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1960}
1961#else
1962static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
1963static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
1964
1965static void __init xen_write_cr3_init(unsigned long cr3)
1966{
1967 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
1968
1969 BUG_ON(read_cr3() != __pa(initial_page_table));
1970 BUG_ON(cr3 != __pa(swapper_pg_dir));
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982 swapper_kernel_pmd =
1983 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1984 copy_page(swapper_kernel_pmd, initial_kernel_pmd);
1985 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
1986 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
1987 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
1988
1989 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1990 xen_write_cr3(cr3);
1991 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
1992
1993 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
1994 PFN_DOWN(__pa(initial_page_table)));
1995 set_page_prot(initial_page_table, PAGE_KERNEL);
1996 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
1997
1998 pv_mmu_ops.write_cr3 = &xen_write_cr3;
1999}
2000
2001void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2002{
2003 pmd_t *kernel_pmd;
2004
2005 initial_kernel_pmd =
2006 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2007
2008 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2009 xen_start_info->nr_pt_frames * PAGE_SIZE +
2010 512*1024);
2011
2012 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2013 copy_page(initial_kernel_pmd, kernel_pmd);
2014
2015 xen_map_identity_early(initial_kernel_pmd, max_pfn);
2016
2017 copy_page(initial_page_table, pgd);
2018 initial_page_table[KERNEL_PGD_BOUNDARY] =
2019 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2020
2021 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2022 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2023 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2024
2025 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2026
2027 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2028 PFN_DOWN(__pa(initial_page_table)));
2029 xen_write_cr3(__pa(initial_page_table));
2030
2031 memblock_reserve(__pa(xen_start_info->pt_base),
2032 xen_start_info->nr_pt_frames * PAGE_SIZE);
2033}
2034#endif
2035
2036static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2037
2038static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2039{
2040 pte_t pte;
2041
2042 phys >>= PAGE_SHIFT;
2043
2044 switch (idx) {
2045 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2046 case FIX_RO_IDT:
2047#ifdef CONFIG_X86_32
2048 case FIX_WP_TEST:
2049 case FIX_VDSO:
2050# ifdef CONFIG_HIGHMEM
2051 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2052# endif
2053#else
2054 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
2055 case VVAR_PAGE:
2056#endif
2057 case FIX_TEXT_POKE0:
2058 case FIX_TEXT_POKE1:
2059
2060 pte = pfn_pte(phys, prot);
2061 break;
2062
2063#ifdef CONFIG_X86_LOCAL_APIC
2064 case FIX_APIC_BASE:
2065 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2066 break;
2067#endif
2068
2069#ifdef CONFIG_X86_IO_APIC
2070 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2071
2072
2073
2074
2075 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2076 break;
2077#endif
2078
2079 case FIX_PARAVIRT_BOOTMAP:
2080
2081
2082 pte = mfn_pte(phys, prot);
2083 break;
2084
2085 default:
2086
2087 pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
2088 break;
2089 }
2090
2091 __native_set_fixmap(idx, pte);
2092
2093#ifdef CONFIG_X86_64
2094
2095
2096 if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
2097 idx == VVAR_PAGE) {
2098 unsigned long vaddr = __fix_to_virt(idx);
2099 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2100 }
2101#endif
2102}
2103
2104static void __init xen_post_allocator_init(void)
2105{
2106 pv_mmu_ops.set_pte = xen_set_pte;
2107 pv_mmu_ops.set_pmd = xen_set_pmd;
2108 pv_mmu_ops.set_pud = xen_set_pud;
2109#if PAGETABLE_LEVELS == 4
2110 pv_mmu_ops.set_pgd = xen_set_pgd;
2111#endif
2112
2113
2114
2115 pv_mmu_ops.alloc_pte = xen_alloc_pte;
2116 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2117 pv_mmu_ops.release_pte = xen_release_pte;
2118 pv_mmu_ops.release_pmd = xen_release_pmd;
2119#if PAGETABLE_LEVELS == 4
2120 pv_mmu_ops.alloc_pud = xen_alloc_pud;
2121 pv_mmu_ops.release_pud = xen_release_pud;
2122#endif
2123
2124#ifdef CONFIG_X86_64
2125 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2126 SetPagePinned(virt_to_page(level3_user_vsyscall));
2127#endif
2128 xen_mark_init_mm_pinned();
2129}
2130
2131static void xen_leave_lazy_mmu(void)
2132{
2133 preempt_disable();
2134 xen_mc_flush();
2135 paravirt_leave_lazy_mmu();
2136 preempt_enable();
2137}
2138
2139static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2140 .read_cr2 = xen_read_cr2,
2141 .write_cr2 = xen_write_cr2,
2142
2143 .read_cr3 = xen_read_cr3,
2144 .write_cr3 = xen_write_cr3_init,
2145
2146 .flush_tlb_user = xen_flush_tlb,
2147 .flush_tlb_kernel = xen_flush_tlb,
2148 .flush_tlb_single = xen_flush_tlb_single,
2149 .flush_tlb_others = xen_flush_tlb_others,
2150
2151 .pte_update = paravirt_nop,
2152 .pte_update_defer = paravirt_nop,
2153
2154 .pgd_alloc = xen_pgd_alloc,
2155 .pgd_free = xen_pgd_free,
2156
2157 .alloc_pte = xen_alloc_pte_init,
2158 .release_pte = xen_release_pte_init,
2159 .alloc_pmd = xen_alloc_pmd_init,
2160 .release_pmd = xen_release_pmd_init,
2161
2162 .set_pte = xen_set_pte_init,
2163 .set_pte_at = xen_set_pte_at,
2164 .set_pmd = xen_set_pmd_hyper,
2165
2166 .ptep_modify_prot_start = __ptep_modify_prot_start,
2167 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
2168
2169 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2170 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2171
2172 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
2173 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2174
2175#ifdef CONFIG_X86_PAE
2176 .set_pte_atomic = xen_set_pte_atomic,
2177 .pte_clear = xen_pte_clear,
2178 .pmd_clear = xen_pmd_clear,
2179#endif
2180 .set_pud = xen_set_pud_hyper,
2181
2182 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2183 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2184
2185#if PAGETABLE_LEVELS == 4
2186 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2187 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2188 .set_pgd = xen_set_pgd_hyper,
2189
2190 .alloc_pud = xen_alloc_pmd_init,
2191 .release_pud = xen_release_pmd_init,
2192#endif
2193
2194 .activate_mm = xen_activate_mm,
2195 .dup_mmap = xen_dup_mmap,
2196 .exit_mmap = xen_exit_mmap,
2197
2198 .lazy_mode = {
2199 .enter = paravirt_enter_lazy_mmu,
2200 .leave = xen_leave_lazy_mmu,
2201 .flush = paravirt_flush_lazy_mmu,
2202 },
2203
2204 .set_fixmap = xen_set_fixmap,
2205};
2206
2207void __init xen_init_mmu_ops(void)
2208{
2209 x86_init.paging.pagetable_init = xen_pagetable_init;
2210 pv_mmu_ops = xen_mmu_ops;
2211
2212 memset(dummy_mapping, 0xff, PAGE_SIZE);
2213}
2214
2215
2216#define MAX_CONTIG_ORDER 9
2217static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2218
2219#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2220static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2221 unsigned long *in_frames,
2222 unsigned long *out_frames)
2223{
2224 int i;
2225 struct multicall_space mcs;
2226
2227 xen_mc_batch();
2228 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2229 mcs = __xen_mc_entry(0);
2230
2231 if (in_frames)
2232 in_frames[i] = virt_to_mfn(vaddr);
2233
2234 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2235 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2236
2237 if (out_frames)
2238 out_frames[i] = virt_to_pfn(vaddr);
2239 }
2240 xen_mc_issue(0);
2241}
2242
2243
2244
2245
2246
2247
2248static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2249 unsigned long *mfns,
2250 unsigned long first_mfn)
2251{
2252 unsigned i, limit;
2253 unsigned long mfn;
2254
2255 xen_mc_batch();
2256
2257 limit = 1u << order;
2258 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2259 struct multicall_space mcs;
2260 unsigned flags;
2261
2262 mcs = __xen_mc_entry(0);
2263 if (mfns)
2264 mfn = mfns[i];
2265 else
2266 mfn = first_mfn + i;
2267
2268 if (i < (limit - 1))
2269 flags = 0;
2270 else {
2271 if (order == 0)
2272 flags = UVMF_INVLPG | UVMF_ALL;
2273 else
2274 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2275 }
2276
2277 MULTI_update_va_mapping(mcs.mc, vaddr,
2278 mfn_pte(mfn, PAGE_KERNEL), flags);
2279
2280 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2281 }
2282
2283 xen_mc_issue(0);
2284}
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2295 unsigned long *pfns_in,
2296 unsigned long extents_out,
2297 unsigned int order_out,
2298 unsigned long *mfns_out,
2299 unsigned int address_bits)
2300{
2301 long rc;
2302 int success;
2303
2304 struct xen_memory_exchange exchange = {
2305 .in = {
2306 .nr_extents = extents_in,
2307 .extent_order = order_in,
2308 .extent_start = pfns_in,
2309 .domid = DOMID_SELF
2310 },
2311 .out = {
2312 .nr_extents = extents_out,
2313 .extent_order = order_out,
2314 .extent_start = mfns_out,
2315 .address_bits = address_bits,
2316 .domid = DOMID_SELF
2317 }
2318 };
2319
2320 BUG_ON(extents_in << order_in != extents_out << order_out);
2321
2322 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2323 success = (exchange.nr_exchanged == extents_in);
2324
2325 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2326 BUG_ON(success && (rc != 0));
2327
2328 return success;
2329}
2330
2331int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2332 unsigned int address_bits)
2333{
2334 unsigned long *in_frames = discontig_frames, out_frame;
2335 unsigned long flags;
2336 int success;
2337
2338
2339
2340
2341
2342
2343
2344 if (xen_feature(XENFEAT_auto_translated_physmap))
2345 return 0;
2346
2347 if (unlikely(order > MAX_CONTIG_ORDER))
2348 return -ENOMEM;
2349
2350 memset((void *) vstart, 0, PAGE_SIZE << order);
2351
2352 spin_lock_irqsave(&xen_reservation_lock, flags);
2353
2354
2355 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2356
2357
2358 out_frame = virt_to_pfn(vstart);
2359 success = xen_exchange_memory(1UL << order, 0, in_frames,
2360 1, order, &out_frame,
2361 address_bits);
2362
2363
2364 if (success)
2365 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2366 else
2367 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2368
2369 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2370
2371 return success ? 0 : -ENOMEM;
2372}
2373EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2374
2375void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2376{
2377 unsigned long *out_frames = discontig_frames, in_frame;
2378 unsigned long flags;
2379 int success;
2380
2381 if (xen_feature(XENFEAT_auto_translated_physmap))
2382 return;
2383
2384 if (unlikely(order > MAX_CONTIG_ORDER))
2385 return;
2386
2387 memset((void *) vstart, 0, PAGE_SIZE << order);
2388
2389 spin_lock_irqsave(&xen_reservation_lock, flags);
2390
2391
2392 in_frame = virt_to_mfn(vstart);
2393
2394
2395 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2396
2397
2398 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2399 0, out_frames, 0);
2400
2401
2402 if (success)
2403 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2404 else
2405 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2406
2407 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2408}
2409EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2410
2411#ifdef CONFIG_XEN_PVHVM
2412#ifdef CONFIG_PROC_VMCORE
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423static int xen_oldmem_pfn_is_ram(unsigned long pfn)
2424{
2425 struct xen_hvm_get_mem_type a = {
2426 .domid = DOMID_SELF,
2427 .pfn = pfn,
2428 };
2429 int ram;
2430
2431 if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a))
2432 return -ENXIO;
2433
2434 switch (a.mem_type) {
2435 case HVMMEM_mmio_dm:
2436 ram = 0;
2437 break;
2438 case HVMMEM_ram_rw:
2439 case HVMMEM_ram_ro:
2440 default:
2441 ram = 1;
2442 break;
2443 }
2444
2445 return ram;
2446}
2447#endif
2448
2449static void xen_hvm_exit_mmap(struct mm_struct *mm)
2450{
2451 struct xen_hvm_pagetable_dying a;
2452 int rc;
2453
2454 a.domid = DOMID_SELF;
2455 a.gpa = __pa(mm->pgd);
2456 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2457 WARN_ON_ONCE(rc < 0);
2458}
2459
2460static int is_pagetable_dying_supported(void)
2461{
2462 struct xen_hvm_pagetable_dying a;
2463 int rc = 0;
2464
2465 a.domid = DOMID_SELF;
2466 a.gpa = 0x00;
2467 rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a);
2468 if (rc < 0) {
2469 printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n");
2470 return 0;
2471 }
2472 return 1;
2473}
2474
2475void __init xen_hvm_init_mmu_ops(void)
2476{
2477 if (is_pagetable_dying_supported())
2478 pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap;
2479#ifdef CONFIG_PROC_VMCORE
2480 register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram);
2481#endif
2482}
2483#endif
2484
2485#define REMAP_BATCH_SIZE 16
2486
2487struct remap_data {
2488 unsigned long mfn;
2489 pgprot_t prot;
2490 struct mmu_update *mmu_update;
2491};
2492
2493static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2494 unsigned long addr, void *data)
2495{
2496 struct remap_data *rmd = data;
2497 pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
2498
2499 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2500 rmd->mmu_update->val = pte_val_ma(pte);
2501 rmd->mmu_update++;
2502
2503 return 0;
2504}
2505
2506int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2507 unsigned long addr,
2508 xen_pfn_t mfn, int nr,
2509 pgprot_t prot, unsigned domid,
2510 struct page **pages)
2511
2512{
2513 struct remap_data rmd;
2514 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2515 int batch;
2516 unsigned long range;
2517 int err = 0;
2518
2519 if (xen_feature(XENFEAT_auto_translated_physmap))
2520 return -EINVAL;
2521
2522 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2523
2524 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2525
2526 rmd.mfn = mfn;
2527 rmd.prot = prot;
2528
2529 while (nr) {
2530 batch = min(REMAP_BATCH_SIZE, nr);
2531 range = (unsigned long)batch << PAGE_SHIFT;
2532
2533 rmd.mmu_update = mmu_update;
2534 err = apply_to_page_range(vma->vm_mm, addr, range,
2535 remap_area_mfn_pte_fn, &rmd);
2536 if (err)
2537 goto out;
2538
2539 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid);
2540 if (err < 0)
2541 goto out;
2542
2543 nr -= batch;
2544 addr += range;
2545 }
2546
2547 err = 0;
2548out:
2549
2550 xen_flush_tlb_all();
2551
2552 return err;
2553}
2554EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2555
2556
2557int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2558 int numpgs, struct page **pages)
2559{
2560 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
2561 return 0;
2562
2563 return -EINVAL;
2564}
2565EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
2566