1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/sched/mm.h>
44#include <linux/debugfs.h>
45#include <linux/bug.h>
46#include <linux/vmalloc.h>
47#include <linux/export.h>
48#include <linux/init.h>
49#include <linux/gfp.h>
50#include <linux/memblock.h>
51#include <linux/seq_file.h>
52#include <linux/crash_dump.h>
53#include <linux/pgtable.h>
54#ifdef CONFIG_KEXEC_CORE
55#include <linux/kexec.h>
56#endif
57
58#include <trace/events/xen.h>
59
60#include <asm/tlbflush.h>
61#include <asm/fixmap.h>
62#include <asm/mmu_context.h>
63#include <asm/setup.h>
64#include <asm/paravirt.h>
65#include <asm/e820/api.h>
66#include <asm/linkage.h>
67#include <asm/page.h>
68#include <asm/init.h>
69#include <asm/memtype.h>
70#include <asm/smp.h>
71#include <asm/tlb.h>
72
73#include <asm/xen/hypercall.h>
74#include <asm/xen/hypervisor.h>
75
76#include <xen/xen.h>
77#include <xen/page.h>
78#include <xen/interface/xen.h>
79#include <xen/interface/hvm/hvm_op.h>
80#include <xen/interface/version.h>
81#include <xen/interface/memory.h>
82#include <xen/hvc-console.h>
83
84#include "multicalls.h"
85#include "mmu.h"
86#include "debugfs.h"
87
88#ifdef CONFIG_X86_VSYSCALL_EMULATION
89
90static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
91#endif
92
93
94
95
96
97static DEFINE_SPINLOCK(xen_reservation_lock);
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113DEFINE_PER_CPU(unsigned long, xen_cr3);
114DEFINE_PER_CPU(unsigned long, xen_current_cr3);
115
116static phys_addr_t xen_pt_base, xen_pt_size __initdata;
117
118static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready);
119
120
121
122
123
124#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
125
126void make_lowmem_page_readonly(void *vaddr)
127{
128 pte_t *pte, ptev;
129 unsigned long address = (unsigned long)vaddr;
130 unsigned int level;
131
132 pte = lookup_address(address, &level);
133 if (pte == NULL)
134 return;
135
136 ptev = pte_wrprotect(*pte);
137
138 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
139 BUG();
140}
141
142void make_lowmem_page_readwrite(void *vaddr)
143{
144 pte_t *pte, ptev;
145 unsigned long address = (unsigned long)vaddr;
146 unsigned int level;
147
148 pte = lookup_address(address, &level);
149 if (pte == NULL)
150 return;
151
152 ptev = pte_mkwrite(*pte);
153
154 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
155 BUG();
156}
157
158
159
160
161
162
163static bool xen_page_pinned(void *ptr)
164{
165 if (static_branch_likely(&xen_struct_pages_ready)) {
166 struct page *page = virt_to_page(ptr);
167
168 return PagePinned(page);
169 }
170 return true;
171}
172
173static void xen_extend_mmu_update(const struct mmu_update *update)
174{
175 struct multicall_space mcs;
176 struct mmu_update *u;
177
178 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
179
180 if (mcs.mc != NULL) {
181 mcs.mc->args[1]++;
182 } else {
183 mcs = __xen_mc_entry(sizeof(*u));
184 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
185 }
186
187 u = mcs.args;
188 *u = *update;
189}
190
191static void xen_extend_mmuext_op(const struct mmuext_op *op)
192{
193 struct multicall_space mcs;
194 struct mmuext_op *u;
195
196 mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u));
197
198 if (mcs.mc != NULL) {
199 mcs.mc->args[1]++;
200 } else {
201 mcs = __xen_mc_entry(sizeof(*u));
202 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
203 }
204
205 u = mcs.args;
206 *u = *op;
207}
208
209static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
210{
211 struct mmu_update u;
212
213 preempt_disable();
214
215 xen_mc_batch();
216
217
218 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
219 u.val = pmd_val_ma(val);
220 xen_extend_mmu_update(&u);
221
222 xen_mc_issue(PARAVIRT_LAZY_MMU);
223
224 preempt_enable();
225}
226
227static void xen_set_pmd(pmd_t *ptr, pmd_t val)
228{
229 trace_xen_mmu_set_pmd(ptr, val);
230
231
232
233 if (!xen_page_pinned(ptr)) {
234 *ptr = val;
235 return;
236 }
237
238 xen_set_pmd_hyper(ptr, val);
239}
240
241
242
243
244
245void __init set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
246{
247 if (HYPERVISOR_update_va_mapping(vaddr, mfn_pte(mfn, flags),
248 UVMF_INVLPG))
249 BUG();
250}
251
252static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
253{
254 struct mmu_update u;
255
256 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
257 return false;
258
259 xen_mc_batch();
260
261 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
262 u.val = pte_val_ma(pteval);
263 xen_extend_mmu_update(&u);
264
265 xen_mc_issue(PARAVIRT_LAZY_MMU);
266
267 return true;
268}
269
270static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
271{
272 if (!xen_batched_set_pte(ptep, pteval)) {
273
274
275
276
277 struct mmu_update u;
278
279 u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
280 u.val = pte_val_ma(pteval);
281 HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
282 }
283}
284
285static void xen_set_pte(pte_t *ptep, pte_t pteval)
286{
287 trace_xen_mmu_set_pte(ptep, pteval);
288 __xen_set_pte(ptep, pteval);
289}
290
291pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
292 unsigned long addr, pte_t *ptep)
293{
294
295 trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
296 return *ptep;
297}
298
299void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
300 pte_t *ptep, pte_t pte)
301{
302 struct mmu_update u;
303
304 trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
305 xen_mc_batch();
306
307 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
308 u.val = pte_val_ma(pte);
309 xen_extend_mmu_update(&u);
310
311 xen_mc_issue(PARAVIRT_LAZY_MMU);
312}
313
314
315static pteval_t pte_mfn_to_pfn(pteval_t val)
316{
317 if (val & _PAGE_PRESENT) {
318 unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT;
319 unsigned long pfn = mfn_to_pfn(mfn);
320
321 pteval_t flags = val & PTE_FLAGS_MASK;
322 if (unlikely(pfn == ~0))
323 val = flags & ~_PAGE_PRESENT;
324 else
325 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
326 }
327
328 return val;
329}
330
331static pteval_t pte_pfn_to_mfn(pteval_t val)
332{
333 if (val & _PAGE_PRESENT) {
334 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
335 pteval_t flags = val & PTE_FLAGS_MASK;
336 unsigned long mfn;
337
338 mfn = __pfn_to_mfn(pfn);
339
340
341
342
343
344
345
346 if (unlikely(mfn == INVALID_P2M_ENTRY)) {
347 mfn = 0;
348 flags = 0;
349 } else
350 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
351 val = ((pteval_t)mfn << PAGE_SHIFT) | flags;
352 }
353
354 return val;
355}
356
357__visible pteval_t xen_pte_val(pte_t pte)
358{
359 pteval_t pteval = pte.pte;
360
361 return pte_mfn_to_pfn(pteval);
362}
363PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
364
365__visible pgdval_t xen_pgd_val(pgd_t pgd)
366{
367 return pte_mfn_to_pfn(pgd.pgd);
368}
369PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
370
371__visible pte_t xen_make_pte(pteval_t pte)
372{
373 pte = pte_pfn_to_mfn(pte);
374
375 return native_make_pte(pte);
376}
377PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
378
379__visible pgd_t xen_make_pgd(pgdval_t pgd)
380{
381 pgd = pte_pfn_to_mfn(pgd);
382 return native_make_pgd(pgd);
383}
384PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
385
386__visible pmdval_t xen_pmd_val(pmd_t pmd)
387{
388 return pte_mfn_to_pfn(pmd.pmd);
389}
390PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
391
392static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
393{
394 struct mmu_update u;
395
396 preempt_disable();
397
398 xen_mc_batch();
399
400
401 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
402 u.val = pud_val_ma(val);
403 xen_extend_mmu_update(&u);
404
405 xen_mc_issue(PARAVIRT_LAZY_MMU);
406
407 preempt_enable();
408}
409
410static void xen_set_pud(pud_t *ptr, pud_t val)
411{
412 trace_xen_mmu_set_pud(ptr, val);
413
414
415
416 if (!xen_page_pinned(ptr)) {
417 *ptr = val;
418 return;
419 }
420
421 xen_set_pud_hyper(ptr, val);
422}
423
424__visible pmd_t xen_make_pmd(pmdval_t pmd)
425{
426 pmd = pte_pfn_to_mfn(pmd);
427 return native_make_pmd(pmd);
428}
429PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
430
431__visible pudval_t xen_pud_val(pud_t pud)
432{
433 return pte_mfn_to_pfn(pud.pud);
434}
435PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
436
437__visible pud_t xen_make_pud(pudval_t pud)
438{
439 pud = pte_pfn_to_mfn(pud);
440
441 return native_make_pud(pud);
442}
443PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
444
445static pgd_t *xen_get_user_pgd(pgd_t *pgd)
446{
447 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
448 unsigned offset = pgd - pgd_page;
449 pgd_t *user_ptr = NULL;
450
451 if (offset < pgd_index(USER_LIMIT)) {
452 struct page *page = virt_to_page(pgd_page);
453 user_ptr = (pgd_t *)page->private;
454 if (user_ptr)
455 user_ptr += offset;
456 }
457
458 return user_ptr;
459}
460
461static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
462{
463 struct mmu_update u;
464
465 u.ptr = virt_to_machine(ptr).maddr;
466 u.val = p4d_val_ma(val);
467 xen_extend_mmu_update(&u);
468}
469
470
471
472
473
474
475
476
477static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val)
478{
479 preempt_disable();
480
481 xen_mc_batch();
482
483 __xen_set_p4d_hyper(ptr, val);
484
485 xen_mc_issue(PARAVIRT_LAZY_MMU);
486
487 preempt_enable();
488}
489
490static void xen_set_p4d(p4d_t *ptr, p4d_t val)
491{
492 pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr);
493 pgd_t pgd_val;
494
495 trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val);
496
497
498
499 if (!xen_page_pinned(ptr)) {
500 *ptr = val;
501 if (user_ptr) {
502 WARN_ON(xen_page_pinned(user_ptr));
503 pgd_val.pgd = p4d_val_ma(val);
504 *user_ptr = pgd_val;
505 }
506 return;
507 }
508
509
510
511 xen_mc_batch();
512
513 __xen_set_p4d_hyper(ptr, val);
514 if (user_ptr)
515 __xen_set_p4d_hyper((p4d_t *)user_ptr, val);
516
517 xen_mc_issue(PARAVIRT_LAZY_MMU);
518}
519
520#if CONFIG_PGTABLE_LEVELS >= 5
521__visible p4dval_t xen_p4d_val(p4d_t p4d)
522{
523 return pte_mfn_to_pfn(p4d.p4d);
524}
525PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val);
526
527__visible p4d_t xen_make_p4d(p4dval_t p4d)
528{
529 p4d = pte_pfn_to_mfn(p4d);
530
531 return native_make_p4d(p4d);
532}
533PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
534#endif
535
536static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
537 void (*func)(struct mm_struct *mm, struct page *,
538 enum pt_level),
539 bool last, unsigned long limit)
540{
541 int i, nr;
542
543 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
544 for (i = 0; i < nr; i++) {
545 if (!pmd_none(pmd[i]))
546 (*func)(mm, pmd_page(pmd[i]), PT_PTE);
547 }
548}
549
550static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
551 void (*func)(struct mm_struct *mm, struct page *,
552 enum pt_level),
553 bool last, unsigned long limit)
554{
555 int i, nr;
556
557 nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
558 for (i = 0; i < nr; i++) {
559 pmd_t *pmd;
560
561 if (pud_none(pud[i]))
562 continue;
563
564 pmd = pmd_offset(&pud[i], 0);
565 if (PTRS_PER_PMD > 1)
566 (*func)(mm, virt_to_page(pmd), PT_PMD);
567 xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
568 }
569}
570
571static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
572 void (*func)(struct mm_struct *mm, struct page *,
573 enum pt_level),
574 bool last, unsigned long limit)
575{
576 pud_t *pud;
577
578
579 if (p4d_none(*p4d))
580 return;
581
582 pud = pud_offset(p4d, 0);
583 if (PTRS_PER_PUD > 1)
584 (*func)(mm, virt_to_page(pud), PT_PUD);
585 xen_pud_walk(mm, pud, func, last, limit);
586}
587
588
589
590
591
592
593
594
595
596
597
598
599
600static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
601 void (*func)(struct mm_struct *mm, struct page *,
602 enum pt_level),
603 unsigned long limit)
604{
605 int i, nr;
606 unsigned hole_low = 0, hole_high = 0;
607
608
609 limit--;
610 BUG_ON(limit >= FIXADDR_TOP);
611
612
613
614
615
616 hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
617 hole_high = pgd_index(GUARD_HOLE_END_ADDR);
618
619 nr = pgd_index(limit) + 1;
620 for (i = 0; i < nr; i++) {
621 p4d_t *p4d;
622
623 if (i >= hole_low && i < hole_high)
624 continue;
625
626 if (pgd_none(pgd[i]))
627 continue;
628
629 p4d = p4d_offset(&pgd[i], 0);
630 xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
631 }
632
633
634
635 (*func)(mm, virt_to_page(pgd), PT_PGD);
636}
637
638static void xen_pgd_walk(struct mm_struct *mm,
639 void (*func)(struct mm_struct *mm, struct page *,
640 enum pt_level),
641 unsigned long limit)
642{
643 __xen_pgd_walk(mm, mm->pgd, func, limit);
644}
645
646
647
648static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
649{
650 spinlock_t *ptl = NULL;
651
652#if USE_SPLIT_PTE_PTLOCKS
653 ptl = ptlock_ptr(page);
654 spin_lock_nest_lock(ptl, &mm->page_table_lock);
655#endif
656
657 return ptl;
658}
659
660static void xen_pte_unlock(void *v)
661{
662 spinlock_t *ptl = v;
663 spin_unlock(ptl);
664}
665
666static void xen_do_pin(unsigned level, unsigned long pfn)
667{
668 struct mmuext_op op;
669
670 op.cmd = level;
671 op.arg1.mfn = pfn_to_mfn(pfn);
672
673 xen_extend_mmuext_op(&op);
674}
675
676static void xen_pin_page(struct mm_struct *mm, struct page *page,
677 enum pt_level level)
678{
679 unsigned pgfl = TestSetPagePinned(page);
680
681 if (!pgfl) {
682 void *pt = lowmem_page_address(page);
683 unsigned long pfn = page_to_pfn(page);
684 struct multicall_space mcs = __xen_mc_entry(0);
685 spinlock_t *ptl;
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707 ptl = NULL;
708 if (level == PT_PTE)
709 ptl = xen_pte_lock(page, mm);
710
711 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
712 pfn_pte(pfn, PAGE_KERNEL_RO),
713 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
714
715 if (ptl) {
716 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
717
718
719
720 xen_mc_callback(xen_pte_unlock, ptl);
721 }
722 }
723}
724
725
726
727
728static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
729{
730 pgd_t *user_pgd = xen_get_user_pgd(pgd);
731
732 trace_xen_mmu_pgd_pin(mm, pgd);
733
734 xen_mc_batch();
735
736 __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
737
738 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
739
740 if (user_pgd) {
741 xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
742 xen_do_pin(MMUEXT_PIN_L4_TABLE,
743 PFN_DOWN(__pa(user_pgd)));
744 }
745
746 xen_mc_issue(0);
747}
748
749static void xen_pgd_pin(struct mm_struct *mm)
750{
751 __xen_pgd_pin(mm, mm->pgd);
752}
753
754
755
756
757
758
759
760
761
762
763
764void xen_mm_pin_all(void)
765{
766 struct page *page;
767
768 spin_lock(&pgd_lock);
769
770 list_for_each_entry(page, &pgd_list, lru) {
771 if (!PagePinned(page)) {
772 __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
773 SetPageSavePinned(page);
774 }
775 }
776
777 spin_unlock(&pgd_lock);
778}
779
780static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
781 enum pt_level level)
782{
783 SetPagePinned(page);
784}
785
786
787
788
789
790
791
792static void __init xen_after_bootmem(void)
793{
794 static_branch_enable(&xen_struct_pages_ready);
795#ifdef CONFIG_X86_VSYSCALL_EMULATION
796 SetPagePinned(virt_to_page(level3_user_vsyscall));
797#endif
798 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
799}
800
801static void xen_unpin_page(struct mm_struct *mm, struct page *page,
802 enum pt_level level)
803{
804 unsigned pgfl = TestClearPagePinned(page);
805
806 if (pgfl) {
807 void *pt = lowmem_page_address(page);
808 unsigned long pfn = page_to_pfn(page);
809 spinlock_t *ptl = NULL;
810 struct multicall_space mcs;
811
812
813
814
815
816
817
818
819 if (level == PT_PTE) {
820 ptl = xen_pte_lock(page, mm);
821
822 if (ptl)
823 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
824 }
825
826 mcs = __xen_mc_entry(0);
827
828 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
829 pfn_pte(pfn, PAGE_KERNEL),
830 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
831
832 if (ptl) {
833
834 xen_mc_callback(xen_pte_unlock, ptl);
835 }
836 }
837}
838
839
840static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
841{
842 pgd_t *user_pgd = xen_get_user_pgd(pgd);
843
844 trace_xen_mmu_pgd_unpin(mm, pgd);
845
846 xen_mc_batch();
847
848 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
849
850 if (user_pgd) {
851 xen_do_pin(MMUEXT_UNPIN_TABLE,
852 PFN_DOWN(__pa(user_pgd)));
853 xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
854 }
855
856 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
857
858 xen_mc_issue(0);
859}
860
861static void xen_pgd_unpin(struct mm_struct *mm)
862{
863 __xen_pgd_unpin(mm, mm->pgd);
864}
865
866
867
868
869
870void xen_mm_unpin_all(void)
871{
872 struct page *page;
873
874 spin_lock(&pgd_lock);
875
876 list_for_each_entry(page, &pgd_list, lru) {
877 if (PageSavePinned(page)) {
878 BUG_ON(!PagePinned(page));
879 __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
880 ClearPageSavePinned(page);
881 }
882 }
883
884 spin_unlock(&pgd_lock);
885}
886
887static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
888{
889 spin_lock(&next->page_table_lock);
890 xen_pgd_pin(next);
891 spin_unlock(&next->page_table_lock);
892}
893
894static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
895{
896 spin_lock(&mm->page_table_lock);
897 xen_pgd_pin(mm);
898 spin_unlock(&mm->page_table_lock);
899}
900
901static void drop_mm_ref_this_cpu(void *info)
902{
903 struct mm_struct *mm = info;
904
905 if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm)
906 leave_mm(smp_processor_id());
907
908
909
910
911
912 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd))
913 xen_mc_flush();
914}
915
916#ifdef CONFIG_SMP
917
918
919
920
921static void xen_drop_mm_ref(struct mm_struct *mm)
922{
923 cpumask_var_t mask;
924 unsigned cpu;
925
926 drop_mm_ref_this_cpu(mm);
927
928
929 if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
930 for_each_online_cpu(cpu) {
931 if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
932 continue;
933 smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
934 }
935 return;
936 }
937
938
939
940
941
942
943
944
945 cpumask_clear(mask);
946 for_each_online_cpu(cpu) {
947 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
948 cpumask_set_cpu(cpu, mask);
949 }
950
951 smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1);
952 free_cpumask_var(mask);
953}
954#else
955static void xen_drop_mm_ref(struct mm_struct *mm)
956{
957 drop_mm_ref_this_cpu(mm);
958}
959#endif
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975static void xen_exit_mmap(struct mm_struct *mm)
976{
977 get_cpu();
978 xen_drop_mm_ref(mm);
979 put_cpu();
980
981 spin_lock(&mm->page_table_lock);
982
983
984 if (xen_page_pinned(mm->pgd))
985 xen_pgd_unpin(mm);
986
987 spin_unlock(&mm->page_table_lock);
988}
989
990static void xen_post_allocator_init(void);
991
992static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
993{
994 struct mmuext_op op;
995
996 op.cmd = cmd;
997 op.arg1.mfn = pfn_to_mfn(pfn);
998 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
999 BUG();
1000}
1001
1002static void __init xen_cleanhighmap(unsigned long vaddr,
1003 unsigned long vaddr_end)
1004{
1005 unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
1006 pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr);
1007
1008
1009
1010 for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
1011 pmd++, vaddr += PMD_SIZE) {
1012 if (pmd_none(*pmd))
1013 continue;
1014 if (vaddr < (unsigned long) _text || vaddr > kernel_end)
1015 set_pmd(pmd, __pmd(0));
1016 }
1017
1018
1019 xen_mc_flush();
1020}
1021
1022
1023
1024
1025static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size)
1026{
1027 void *vaddr = __va(paddr);
1028 void *vaddr_end = vaddr + size;
1029
1030 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE)
1031 make_lowmem_page_readwrite(vaddr);
1032
1033 memblock_phys_free(paddr, size);
1034}
1035
1036static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin)
1037{
1038 unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK;
1039
1040 if (unpin)
1041 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa));
1042 ClearPagePinned(virt_to_page(__va(pa)));
1043 xen_free_ro_pages(pa, PAGE_SIZE);
1044}
1045
1046static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin)
1047{
1048 unsigned long pa;
1049 pte_t *pte_tbl;
1050 int i;
1051
1052 if (pmd_large(*pmd)) {
1053 pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK;
1054 xen_free_ro_pages(pa, PMD_SIZE);
1055 return;
1056 }
1057
1058 pte_tbl = pte_offset_kernel(pmd, 0);
1059 for (i = 0; i < PTRS_PER_PTE; i++) {
1060 if (pte_none(pte_tbl[i]))
1061 continue;
1062 pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT;
1063 xen_free_ro_pages(pa, PAGE_SIZE);
1064 }
1065 set_pmd(pmd, __pmd(0));
1066 xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin);
1067}
1068
1069static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin)
1070{
1071 unsigned long pa;
1072 pmd_t *pmd_tbl;
1073 int i;
1074
1075 if (pud_large(*pud)) {
1076 pa = pud_val(*pud) & PHYSICAL_PAGE_MASK;
1077 xen_free_ro_pages(pa, PUD_SIZE);
1078 return;
1079 }
1080
1081 pmd_tbl = pmd_offset(pud, 0);
1082 for (i = 0; i < PTRS_PER_PMD; i++) {
1083 if (pmd_none(pmd_tbl[i]))
1084 continue;
1085 xen_cleanmfnmap_pmd(pmd_tbl + i, unpin);
1086 }
1087 set_pud(pud, __pud(0));
1088 xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin);
1089}
1090
1091static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin)
1092{
1093 unsigned long pa;
1094 pud_t *pud_tbl;
1095 int i;
1096
1097 if (p4d_large(*p4d)) {
1098 pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK;
1099 xen_free_ro_pages(pa, P4D_SIZE);
1100 return;
1101 }
1102
1103 pud_tbl = pud_offset(p4d, 0);
1104 for (i = 0; i < PTRS_PER_PUD; i++) {
1105 if (pud_none(pud_tbl[i]))
1106 continue;
1107 xen_cleanmfnmap_pud(pud_tbl + i, unpin);
1108 }
1109 set_p4d(p4d, __p4d(0));
1110 xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin);
1111}
1112
1113
1114
1115
1116
1117static void __init xen_cleanmfnmap(unsigned long vaddr)
1118{
1119 pgd_t *pgd;
1120 p4d_t *p4d;
1121 bool unpin;
1122
1123 unpin = (vaddr == 2 * PGDIR_SIZE);
1124 vaddr &= PMD_MASK;
1125 pgd = pgd_offset_k(vaddr);
1126 p4d = p4d_offset(pgd, 0);
1127 if (!p4d_none(*p4d))
1128 xen_cleanmfnmap_p4d(p4d, unpin);
1129}
1130
1131static void __init xen_pagetable_p2m_free(void)
1132{
1133 unsigned long size;
1134 unsigned long addr;
1135
1136 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1137
1138
1139 if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list)
1140 return;
1141
1142
1143 memset((void *)xen_start_info->mfn_list, 0xff, size);
1144
1145 addr = xen_start_info->mfn_list;
1146
1147
1148
1149
1150
1151
1152
1153 size = roundup(size, PMD_SIZE);
1154
1155 if (addr >= __START_KERNEL_map) {
1156 xen_cleanhighmap(addr, addr + size);
1157 size = PAGE_ALIGN(xen_start_info->nr_pages *
1158 sizeof(unsigned long));
1159 memblock_free((void *)addr, size);
1160 } else {
1161 xen_cleanmfnmap(addr);
1162 }
1163}
1164
1165static void __init xen_pagetable_cleanhighmap(void)
1166{
1167 unsigned long size;
1168 unsigned long addr;
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179 addr = xen_start_info->pt_base;
1180 size = xen_start_info->nr_pt_frames * PAGE_SIZE;
1181
1182 xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
1183 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
1184}
1185
1186static void __init xen_pagetable_p2m_setup(void)
1187{
1188 xen_vmalloc_p2m_tree();
1189
1190 xen_pagetable_p2m_free();
1191
1192 xen_pagetable_cleanhighmap();
1193
1194
1195 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1196}
1197
1198static void __init xen_pagetable_init(void)
1199{
1200
1201
1202
1203
1204
1205 pv_ops.mmu.set_pte = __xen_set_pte;
1206
1207 paging_init();
1208 xen_post_allocator_init();
1209
1210 xen_pagetable_p2m_setup();
1211
1212
1213 xen_build_mfn_list_list();
1214
1215
1216 xen_remap_memory();
1217 xen_setup_mfn_list_list();
1218}
1219
1220static noinstr void xen_write_cr2(unsigned long cr2)
1221{
1222 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1223}
1224
1225static noinline void xen_flush_tlb(void)
1226{
1227 struct mmuext_op *op;
1228 struct multicall_space mcs;
1229
1230 preempt_disable();
1231
1232 mcs = xen_mc_entry(sizeof(*op));
1233
1234 op = mcs.args;
1235 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1236 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1237
1238 xen_mc_issue(PARAVIRT_LAZY_MMU);
1239
1240 preempt_enable();
1241}
1242
1243static void xen_flush_tlb_one_user(unsigned long addr)
1244{
1245 struct mmuext_op *op;
1246 struct multicall_space mcs;
1247
1248 trace_xen_mmu_flush_tlb_one_user(addr);
1249
1250 preempt_disable();
1251
1252 mcs = xen_mc_entry(sizeof(*op));
1253 op = mcs.args;
1254 op->cmd = MMUEXT_INVLPG_LOCAL;
1255 op->arg1.linear_addr = addr & PAGE_MASK;
1256 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1257
1258 xen_mc_issue(PARAVIRT_LAZY_MMU);
1259
1260 preempt_enable();
1261}
1262
1263static void xen_flush_tlb_multi(const struct cpumask *cpus,
1264 const struct flush_tlb_info *info)
1265{
1266 struct {
1267 struct mmuext_op op;
1268 DECLARE_BITMAP(mask, NR_CPUS);
1269 } *args;
1270 struct multicall_space mcs;
1271 const size_t mc_entry_size = sizeof(args->op) +
1272 sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
1273
1274 trace_xen_mmu_flush_tlb_multi(cpus, info->mm, info->start, info->end);
1275
1276 if (cpumask_empty(cpus))
1277 return;
1278
1279 mcs = xen_mc_entry(mc_entry_size);
1280 args = mcs.args;
1281 args->op.arg2.vcpumask = to_cpumask(args->mask);
1282
1283
1284 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1285
1286 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1287 if (info->end != TLB_FLUSH_ALL &&
1288 (info->end - info->start) <= PAGE_SIZE) {
1289 args->op.cmd = MMUEXT_INVLPG_MULTI;
1290 args->op.arg1.linear_addr = info->start;
1291 }
1292
1293 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1294
1295 xen_mc_issue(PARAVIRT_LAZY_MMU);
1296}
1297
1298static unsigned long xen_read_cr3(void)
1299{
1300 return this_cpu_read(xen_cr3);
1301}
1302
1303static void set_current_cr3(void *v)
1304{
1305 this_cpu_write(xen_current_cr3, (unsigned long)v);
1306}
1307
1308static void __xen_write_cr3(bool kernel, unsigned long cr3)
1309{
1310 struct mmuext_op op;
1311 unsigned long mfn;
1312
1313 trace_xen_mmu_write_cr3(kernel, cr3);
1314
1315 if (cr3)
1316 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1317 else
1318 mfn = 0;
1319
1320 WARN_ON(mfn == 0 && kernel);
1321
1322 op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1323 op.arg1.mfn = mfn;
1324
1325 xen_extend_mmuext_op(&op);
1326
1327 if (kernel) {
1328 this_cpu_write(xen_cr3, cr3);
1329
1330
1331
1332 xen_mc_callback(set_current_cr3, (void *)cr3);
1333 }
1334}
1335static void xen_write_cr3(unsigned long cr3)
1336{
1337 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1338
1339 BUG_ON(preemptible());
1340
1341 xen_mc_batch();
1342
1343
1344
1345 this_cpu_write(xen_cr3, cr3);
1346
1347 __xen_write_cr3(true, cr3);
1348
1349 if (user_pgd)
1350 __xen_write_cr3(false, __pa(user_pgd));
1351 else
1352 __xen_write_cr3(false, 0);
1353
1354 xen_mc_issue(PARAVIRT_LAZY_CPU);
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377static void __init xen_write_cr3_init(unsigned long cr3)
1378{
1379 BUG_ON(preemptible());
1380
1381 xen_mc_batch();
1382
1383
1384
1385 this_cpu_write(xen_cr3, cr3);
1386
1387 __xen_write_cr3(true, cr3);
1388
1389 xen_mc_issue(PARAVIRT_LAZY_CPU);
1390}
1391
1392static int xen_pgd_alloc(struct mm_struct *mm)
1393{
1394 pgd_t *pgd = mm->pgd;
1395 struct page *page = virt_to_page(pgd);
1396 pgd_t *user_pgd;
1397 int ret = -ENOMEM;
1398
1399 BUG_ON(PagePinned(virt_to_page(pgd)));
1400 BUG_ON(page->private != 0);
1401
1402 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1403 page->private = (unsigned long)user_pgd;
1404
1405 if (user_pgd != NULL) {
1406#ifdef CONFIG_X86_VSYSCALL_EMULATION
1407 user_pgd[pgd_index(VSYSCALL_ADDR)] =
1408 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1409#endif
1410 ret = 0;
1411 }
1412
1413 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1414
1415 return ret;
1416}
1417
1418static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1419{
1420 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1421
1422 if (user_pgd)
1423 free_page((unsigned long)user_pgd);
1424}
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
1441{
1442 if (unlikely(is_early_ioremap_ptep(ptep)))
1443 __xen_set_pte(ptep, pte);
1444 else
1445 native_set_pte(ptep, pte);
1446}
1447
1448__visible pte_t xen_make_pte_init(pteval_t pte)
1449{
1450 unsigned long pfn;
1451
1452
1453
1454
1455
1456
1457
1458 pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
1459 if (xen_start_info->mfn_list < __START_KERNEL_map &&
1460 pfn >= xen_start_info->first_p2m_pfn &&
1461 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
1462 pte &= ~_PAGE_RW;
1463
1464 pte = pte_pfn_to_mfn(pte);
1465 return native_make_pte(pte);
1466}
1467PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
1468
1469
1470
1471static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1472{
1473#ifdef CONFIG_FLATMEM
1474 BUG_ON(mem_map);
1475#endif
1476 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1477 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1478}
1479
1480
1481static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn)
1482{
1483#ifdef CONFIG_FLATMEM
1484 BUG_ON(mem_map);
1485#endif
1486 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1487}
1488
1489
1490
1491static void __init xen_release_pte_init(unsigned long pfn)
1492{
1493 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1494 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1495}
1496
1497static void __init xen_release_pmd_init(unsigned long pfn)
1498{
1499 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1500}
1501
1502static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1503{
1504 struct multicall_space mcs;
1505 struct mmuext_op *op;
1506
1507 mcs = __xen_mc_entry(sizeof(*op));
1508 op = mcs.args;
1509 op->cmd = cmd;
1510 op->arg1.mfn = pfn_to_mfn(pfn);
1511
1512 MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
1513}
1514
1515static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot)
1516{
1517 struct multicall_space mcs;
1518 unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT);
1519
1520 mcs = __xen_mc_entry(0);
1521 MULTI_update_va_mapping(mcs.mc, (unsigned long)addr,
1522 pfn_pte(pfn, prot), 0);
1523}
1524
1525
1526
1527static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn,
1528 unsigned level)
1529{
1530 bool pinned = xen_page_pinned(mm->pgd);
1531
1532 trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned);
1533
1534 if (pinned) {
1535 struct page *page = pfn_to_page(pfn);
1536
1537 pinned = false;
1538 if (static_branch_likely(&xen_struct_pages_ready)) {
1539 pinned = PagePinned(page);
1540 SetPagePinned(page);
1541 }
1542
1543 xen_mc_batch();
1544
1545 __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1546
1547 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned)
1548 __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1549
1550 xen_mc_issue(PARAVIRT_LAZY_MMU);
1551 }
1552}
1553
1554static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1555{
1556 xen_alloc_ptpage(mm, pfn, PT_PTE);
1557}
1558
1559static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1560{
1561 xen_alloc_ptpage(mm, pfn, PT_PMD);
1562}
1563
1564
1565static inline void xen_release_ptpage(unsigned long pfn, unsigned level)
1566{
1567 struct page *page = pfn_to_page(pfn);
1568 bool pinned = PagePinned(page);
1569
1570 trace_xen_mmu_release_ptpage(pfn, level, pinned);
1571
1572 if (pinned) {
1573 xen_mc_batch();
1574
1575 if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1576 __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1577
1578 __set_pfn_prot(pfn, PAGE_KERNEL);
1579
1580 xen_mc_issue(PARAVIRT_LAZY_MMU);
1581
1582 ClearPagePinned(page);
1583 }
1584}
1585
1586static void xen_release_pte(unsigned long pfn)
1587{
1588 xen_release_ptpage(pfn, PT_PTE);
1589}
1590
1591static void xen_release_pmd(unsigned long pfn)
1592{
1593 xen_release_ptpage(pfn, PT_PMD);
1594}
1595
1596static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1597{
1598 xen_alloc_ptpage(mm, pfn, PT_PUD);
1599}
1600
1601static void xen_release_pud(unsigned long pfn)
1602{
1603 xen_release_ptpage(pfn, PT_PUD);
1604}
1605
1606
1607
1608
1609
1610static void * __init __ka(phys_addr_t paddr)
1611{
1612 return (void *)(paddr + __START_KERNEL_map);
1613}
1614
1615
1616static unsigned long __init m2p(phys_addr_t maddr)
1617{
1618 phys_addr_t paddr;
1619
1620 maddr &= XEN_PTE_MFN_MASK;
1621 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1622
1623 return paddr;
1624}
1625
1626
1627static void * __init m2v(phys_addr_t maddr)
1628{
1629 return __ka(m2p(maddr));
1630}
1631
1632
1633static void __init set_page_prot_flags(void *addr, pgprot_t prot,
1634 unsigned long flags)
1635{
1636 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1637 pte_t pte = pfn_pte(pfn, prot);
1638
1639 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
1640 BUG();
1641}
1642static void __init set_page_prot(void *addr, pgprot_t prot)
1643{
1644 return set_page_prot_flags(addr, prot, UVMF_NONE);
1645}
1646
1647void __init xen_setup_machphys_mapping(void)
1648{
1649 struct xen_machphys_mapping mapping;
1650
1651 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
1652 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
1653 machine_to_phys_nr = mapping.max_mfn + 1;
1654 } else {
1655 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
1656 }
1657}
1658
1659static void __init convert_pfn_mfn(void *v)
1660{
1661 pte_t *pte = v;
1662 int i;
1663
1664
1665
1666 for (i = 0; i < PTRS_PER_PTE; i++)
1667 pte[i] = xen_make_pte(pte[i].pte);
1668}
1669static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
1670 unsigned long addr)
1671{
1672 if (*pt_base == PFN_DOWN(__pa(addr))) {
1673 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1674 clear_page((void *)addr);
1675 (*pt_base)++;
1676 }
1677 if (*pt_end == PFN_DOWN(__pa(addr))) {
1678 set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
1679 clear_page((void *)addr);
1680 (*pt_end)--;
1681 }
1682}
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
1694{
1695 pud_t *l3;
1696 pmd_t *l2;
1697 unsigned long addr[3];
1698 unsigned long pt_base, pt_end;
1699 unsigned i;
1700
1701
1702
1703
1704
1705 if (xen_start_info->mfn_list < __START_KERNEL_map)
1706 max_pfn_mapped = xen_start_info->first_p2m_pfn;
1707 else
1708 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list));
1709
1710 pt_base = PFN_DOWN(__pa(xen_start_info->pt_base));
1711 pt_end = pt_base + xen_start_info->nr_pt_frames;
1712
1713
1714 init_top_pgt[0] = __pgd(0);
1715
1716
1717
1718
1719 convert_pfn_mfn(init_top_pgt);
1720
1721
1722 convert_pfn_mfn(level3_ident_pgt);
1723
1724
1725 convert_pfn_mfn(level3_kernel_pgt);
1726
1727
1728 convert_pfn_mfn(level2_fixmap_pgt);
1729
1730
1731 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1732 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1733
1734 addr[0] = (unsigned long)pgd;
1735 addr[1] = (unsigned long)l3;
1736 addr[2] = (unsigned long)l2;
1737
1738
1739
1740
1741
1742
1743 copy_page(level2_ident_pgt, l2);
1744
1745 copy_page(level2_kernel_pgt, l2);
1746
1747
1748
1749
1750
1751 if (__supported_pte_mask & _PAGE_NX) {
1752 for (i = 0; i < PTRS_PER_PMD; ++i) {
1753 if (pmd_none(level2_ident_pgt[i]))
1754 continue;
1755 level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX);
1756 }
1757 }
1758
1759
1760 i = pgd_index(xen_start_info->mfn_list);
1761 if (i && i < pgd_index(__START_KERNEL_map))
1762 init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i];
1763
1764
1765 set_page_prot(init_top_pgt, PAGE_KERNEL_RO);
1766 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1767 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1768 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1769 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1770 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1771
1772 for (i = 0; i < FIXMAP_PMD_NUM; i++) {
1773 set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
1774 PAGE_KERNEL_RO);
1775 }
1776
1777
1778 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1779 PFN_DOWN(__pa_symbol(init_top_pgt)));
1780
1781
1782 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1783
1784#ifdef CONFIG_X86_VSYSCALL_EMULATION
1785
1786 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1787 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
1788 PFN_DOWN(__pa_symbol(level3_user_vsyscall)));
1789#endif
1790
1791
1792
1793
1794
1795 xen_mc_batch();
1796 __xen_write_cr3(true, __pa(init_top_pgt));
1797 xen_mc_issue(PARAVIRT_LAZY_CPU);
1798
1799
1800
1801
1802
1803
1804
1805 for (i = 0; i < ARRAY_SIZE(addr); i++)
1806 check_pt_base(&pt_base, &pt_end, addr[i]);
1807
1808
1809 xen_pt_base = PFN_PHYS(pt_base);
1810 xen_pt_size = (pt_end - pt_base) * PAGE_SIZE;
1811 memblock_reserve(xen_pt_base, xen_pt_size);
1812
1813
1814 xen_start_info = (struct start_info *)__va(__pa(xen_start_info));
1815}
1816
1817
1818
1819
1820static unsigned long __init xen_read_phys_ulong(phys_addr_t addr)
1821{
1822 unsigned long *vaddr;
1823 unsigned long val;
1824
1825 vaddr = early_memremap_ro(addr, sizeof(val));
1826 val = *vaddr;
1827 early_memunmap(vaddr, sizeof(val));
1828 return val;
1829}
1830
1831
1832
1833
1834
1835
1836static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
1837{
1838 phys_addr_t pa;
1839 pgd_t pgd;
1840 pud_t pud;
1841 pmd_t pmd;
1842 pte_t pte;
1843
1844 pa = read_cr3_pa();
1845 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) *
1846 sizeof(pgd)));
1847 if (!pgd_present(pgd))
1848 return 0;
1849
1850 pa = pgd_val(pgd) & PTE_PFN_MASK;
1851 pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) *
1852 sizeof(pud)));
1853 if (!pud_present(pud))
1854 return 0;
1855 pa = pud_val(pud) & PTE_PFN_MASK;
1856 if (pud_large(pud))
1857 return pa + (vaddr & ~PUD_MASK);
1858
1859 pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) *
1860 sizeof(pmd)));
1861 if (!pmd_present(pmd))
1862 return 0;
1863 pa = pmd_val(pmd) & PTE_PFN_MASK;
1864 if (pmd_large(pmd))
1865 return pa + (vaddr & ~PMD_MASK);
1866
1867 pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) *
1868 sizeof(pte)));
1869 if (!pte_present(pte))
1870 return 0;
1871 pa = pte_pfn(pte) << PAGE_SHIFT;
1872
1873 return pa | (vaddr & ~PAGE_MASK);
1874}
1875
1876
1877
1878
1879
1880void __init xen_relocate_p2m(void)
1881{
1882 phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
1883 unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
1884 int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
1885 pte_t *pt;
1886 pmd_t *pmd;
1887 pud_t *pud;
1888 pgd_t *pgd;
1889 unsigned long *new_p2m;
1890
1891 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
1892 n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
1893 n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
1894 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
1895 n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
1896 n_frames = n_pte + n_pt + n_pmd + n_pud;
1897
1898 new_area = xen_find_free_area(PFN_PHYS(n_frames));
1899 if (!new_area) {
1900 xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n");
1901 BUG();
1902 }
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912 pud_phys = new_area;
1913 pmd_phys = pud_phys + PFN_PHYS(n_pud);
1914 pt_phys = pmd_phys + PFN_PHYS(n_pmd);
1915 p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
1916
1917 pgd = __va(read_cr3_pa());
1918 new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
1919 for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
1920 pud = early_memremap(pud_phys, PAGE_SIZE);
1921 clear_page(pud);
1922 for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
1923 idx_pmd++) {
1924 pmd = early_memremap(pmd_phys, PAGE_SIZE);
1925 clear_page(pmd);
1926 for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
1927 idx_pt++) {
1928 pt = early_memremap(pt_phys, PAGE_SIZE);
1929 clear_page(pt);
1930 for (idx_pte = 0;
1931 idx_pte < min(n_pte, PTRS_PER_PTE);
1932 idx_pte++) {
1933 pt[idx_pte] = pfn_pte(p2m_pfn,
1934 PAGE_KERNEL);
1935 p2m_pfn++;
1936 }
1937 n_pte -= PTRS_PER_PTE;
1938 early_memunmap(pt, PAGE_SIZE);
1939 make_lowmem_page_readonly(__va(pt_phys));
1940 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
1941 PFN_DOWN(pt_phys));
1942 pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
1943 pt_phys += PAGE_SIZE;
1944 }
1945 n_pt -= PTRS_PER_PMD;
1946 early_memunmap(pmd, PAGE_SIZE);
1947 make_lowmem_page_readonly(__va(pmd_phys));
1948 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
1949 PFN_DOWN(pmd_phys));
1950 pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
1951 pmd_phys += PAGE_SIZE;
1952 }
1953 n_pmd -= PTRS_PER_PUD;
1954 early_memunmap(pud, PAGE_SIZE);
1955 make_lowmem_page_readonly(__va(pud_phys));
1956 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
1957 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
1958 pud_phys += PAGE_SIZE;
1959 }
1960
1961
1962 memcpy(new_p2m, xen_p2m_addr, size);
1963 xen_p2m_addr = new_p2m;
1964
1965
1966 p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list));
1967 BUG_ON(!p2m_pfn);
1968 p2m_pfn_end = p2m_pfn + PFN_DOWN(size);
1969
1970 if (xen_start_info->mfn_list < __START_KERNEL_map) {
1971 pfn = xen_start_info->first_p2m_pfn;
1972 pfn_end = xen_start_info->first_p2m_pfn +
1973 xen_start_info->nr_p2m_frames;
1974 set_pgd(pgd + 1, __pgd(0));
1975 } else {
1976 pfn = p2m_pfn;
1977 pfn_end = p2m_pfn_end;
1978 }
1979
1980 memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn));
1981 while (pfn < pfn_end) {
1982 if (pfn == p2m_pfn) {
1983 pfn = p2m_pfn_end;
1984 continue;
1985 }
1986 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1987 pfn++;
1988 }
1989
1990 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
1991 xen_start_info->first_p2m_pfn = PFN_DOWN(new_area);
1992 xen_start_info->nr_p2m_frames = n_frames;
1993}
1994
1995void __init xen_reserve_special_pages(void)
1996{
1997 phys_addr_t paddr;
1998
1999 memblock_reserve(__pa(xen_start_info), PAGE_SIZE);
2000 if (xen_start_info->store_mfn) {
2001 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn));
2002 memblock_reserve(paddr, PAGE_SIZE);
2003 }
2004 if (!xen_initial_domain()) {
2005 paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn));
2006 memblock_reserve(paddr, PAGE_SIZE);
2007 }
2008}
2009
2010void __init xen_pt_check_e820(void)
2011{
2012 if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) {
2013 xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n");
2014 BUG();
2015 }
2016}
2017
2018static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
2019
2020static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
2021{
2022 pte_t pte;
2023 unsigned long vaddr;
2024
2025 phys >>= PAGE_SHIFT;
2026
2027 switch (idx) {
2028 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2029#ifdef CONFIG_X86_VSYSCALL_EMULATION
2030 case VSYSCALL_PAGE:
2031#endif
2032
2033 pte = pfn_pte(phys, prot);
2034 break;
2035
2036#ifdef CONFIG_X86_LOCAL_APIC
2037 case FIX_APIC_BASE:
2038 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2039 break;
2040#endif
2041
2042#ifdef CONFIG_X86_IO_APIC
2043 case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END:
2044
2045
2046
2047
2048 pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL);
2049 break;
2050#endif
2051
2052 case FIX_PARAVIRT_BOOTMAP:
2053
2054
2055 pte = mfn_pte(phys, prot);
2056 break;
2057
2058 default:
2059
2060 pte = mfn_pte(phys, prot);
2061 break;
2062 }
2063
2064 vaddr = __fix_to_virt(idx);
2065 if (HYPERVISOR_update_va_mapping(vaddr, pte, UVMF_INVLPG))
2066 BUG();
2067
2068#ifdef CONFIG_X86_VSYSCALL_EMULATION
2069
2070
2071 if (idx == VSYSCALL_PAGE)
2072 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
2073#endif
2074}
2075
2076static void __init xen_post_allocator_init(void)
2077{
2078 pv_ops.mmu.set_pte = xen_set_pte;
2079 pv_ops.mmu.set_pmd = xen_set_pmd;
2080 pv_ops.mmu.set_pud = xen_set_pud;
2081 pv_ops.mmu.set_p4d = xen_set_p4d;
2082
2083
2084
2085 pv_ops.mmu.alloc_pte = xen_alloc_pte;
2086 pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
2087 pv_ops.mmu.release_pte = xen_release_pte;
2088 pv_ops.mmu.release_pmd = xen_release_pmd;
2089 pv_ops.mmu.alloc_pud = xen_alloc_pud;
2090 pv_ops.mmu.release_pud = xen_release_pud;
2091 pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2092
2093 pv_ops.mmu.write_cr3 = &xen_write_cr3;
2094}
2095
2096static void xen_leave_lazy_mmu(void)
2097{
2098 preempt_disable();
2099 xen_mc_flush();
2100 paravirt_leave_lazy_mmu();
2101 preempt_enable();
2102}
2103
2104static const typeof(pv_ops) xen_mmu_ops __initconst = {
2105 .mmu = {
2106 .read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2),
2107 .write_cr2 = xen_write_cr2,
2108
2109 .read_cr3 = xen_read_cr3,
2110 .write_cr3 = xen_write_cr3_init,
2111
2112 .flush_tlb_user = xen_flush_tlb,
2113 .flush_tlb_kernel = xen_flush_tlb,
2114 .flush_tlb_one_user = xen_flush_tlb_one_user,
2115 .flush_tlb_multi = xen_flush_tlb_multi,
2116 .tlb_remove_table = tlb_remove_table,
2117
2118 .pgd_alloc = xen_pgd_alloc,
2119 .pgd_free = xen_pgd_free,
2120
2121 .alloc_pte = xen_alloc_pte_init,
2122 .release_pte = xen_release_pte_init,
2123 .alloc_pmd = xen_alloc_pmd_init,
2124 .release_pmd = xen_release_pmd_init,
2125
2126 .set_pte = xen_set_pte_init,
2127 .set_pmd = xen_set_pmd_hyper,
2128
2129 .ptep_modify_prot_start = xen_ptep_modify_prot_start,
2130 .ptep_modify_prot_commit = xen_ptep_modify_prot_commit,
2131
2132 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
2133 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
2134
2135 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
2136 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
2137
2138 .set_pud = xen_set_pud_hyper,
2139
2140 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
2141 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
2142
2143 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
2144 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
2145 .set_p4d = xen_set_p4d_hyper,
2146
2147 .alloc_pud = xen_alloc_pmd_init,
2148 .release_pud = xen_release_pmd_init,
2149
2150#if CONFIG_PGTABLE_LEVELS >= 5
2151 .p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
2152 .make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
2153#endif
2154
2155 .activate_mm = xen_activate_mm,
2156 .dup_mmap = xen_dup_mmap,
2157 .exit_mmap = xen_exit_mmap,
2158
2159 .lazy_mode = {
2160 .enter = paravirt_enter_lazy_mmu,
2161 .leave = xen_leave_lazy_mmu,
2162 .flush = paravirt_flush_lazy_mmu,
2163 },
2164
2165 .set_fixmap = xen_set_fixmap,
2166 },
2167};
2168
2169void __init xen_init_mmu_ops(void)
2170{
2171 x86_init.paging.pagetable_init = xen_pagetable_init;
2172 x86_init.hyper.init_after_bootmem = xen_after_bootmem;
2173
2174 pv_ops.mmu = xen_mmu_ops.mmu;
2175
2176 memset(dummy_mapping, 0xff, PAGE_SIZE);
2177}
2178
2179
2180#define MAX_CONTIG_ORDER 9
2181static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2182
2183#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2184static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2185 unsigned long *in_frames,
2186 unsigned long *out_frames)
2187{
2188 int i;
2189 struct multicall_space mcs;
2190
2191 xen_mc_batch();
2192 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2193 mcs = __xen_mc_entry(0);
2194
2195 if (in_frames)
2196 in_frames[i] = virt_to_mfn(vaddr);
2197
2198 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2199 __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2200
2201 if (out_frames)
2202 out_frames[i] = virt_to_pfn(vaddr);
2203 }
2204 xen_mc_issue(0);
2205}
2206
2207
2208
2209
2210
2211
2212static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2213 unsigned long *mfns,
2214 unsigned long first_mfn)
2215{
2216 unsigned i, limit;
2217 unsigned long mfn;
2218
2219 xen_mc_batch();
2220
2221 limit = 1u << order;
2222 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2223 struct multicall_space mcs;
2224 unsigned flags;
2225
2226 mcs = __xen_mc_entry(0);
2227 if (mfns)
2228 mfn = mfns[i];
2229 else
2230 mfn = first_mfn + i;
2231
2232 if (i < (limit - 1))
2233 flags = 0;
2234 else {
2235 if (order == 0)
2236 flags = UVMF_INVLPG | UVMF_ALL;
2237 else
2238 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2239 }
2240
2241 MULTI_update_va_mapping(mcs.mc, vaddr,
2242 mfn_pte(mfn, PAGE_KERNEL), flags);
2243
2244 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2245 }
2246
2247 xen_mc_issue(0);
2248}
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2259 unsigned long *pfns_in,
2260 unsigned long extents_out,
2261 unsigned int order_out,
2262 unsigned long *mfns_out,
2263 unsigned int address_bits)
2264{
2265 long rc;
2266 int success;
2267
2268 struct xen_memory_exchange exchange = {
2269 .in = {
2270 .nr_extents = extents_in,
2271 .extent_order = order_in,
2272 .extent_start = pfns_in,
2273 .domid = DOMID_SELF
2274 },
2275 .out = {
2276 .nr_extents = extents_out,
2277 .extent_order = order_out,
2278 .extent_start = mfns_out,
2279 .address_bits = address_bits,
2280 .domid = DOMID_SELF
2281 }
2282 };
2283
2284 BUG_ON(extents_in << order_in != extents_out << order_out);
2285
2286 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2287 success = (exchange.nr_exchanged == extents_in);
2288
2289 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2290 BUG_ON(success && (rc != 0));
2291
2292 return success;
2293}
2294
2295int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
2296 unsigned int address_bits,
2297 dma_addr_t *dma_handle)
2298{
2299 unsigned long *in_frames = discontig_frames, out_frame;
2300 unsigned long flags;
2301 int success;
2302 unsigned long vstart = (unsigned long)phys_to_virt(pstart);
2303
2304
2305
2306
2307
2308
2309
2310 if (unlikely(order > MAX_CONTIG_ORDER))
2311 return -ENOMEM;
2312
2313 memset((void *) vstart, 0, PAGE_SIZE << order);
2314
2315 spin_lock_irqsave(&xen_reservation_lock, flags);
2316
2317
2318 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2319
2320
2321 out_frame = virt_to_pfn(vstart);
2322 success = xen_exchange_memory(1UL << order, 0, in_frames,
2323 1, order, &out_frame,
2324 address_bits);
2325
2326
2327 if (success)
2328 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2329 else
2330 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2331
2332 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2333
2334 *dma_handle = virt_to_machine(vstart).maddr;
2335 return success ? 0 : -ENOMEM;
2336}
2337
2338void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
2339{
2340 unsigned long *out_frames = discontig_frames, in_frame;
2341 unsigned long flags;
2342 int success;
2343 unsigned long vstart;
2344
2345 if (unlikely(order > MAX_CONTIG_ORDER))
2346 return;
2347
2348 vstart = (unsigned long)phys_to_virt(pstart);
2349 memset((void *) vstart, 0, PAGE_SIZE << order);
2350
2351 spin_lock_irqsave(&xen_reservation_lock, flags);
2352
2353
2354 in_frame = virt_to_mfn(vstart);
2355
2356
2357 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2358
2359
2360 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2361 0, out_frames, 0);
2362
2363
2364 if (success)
2365 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2366 else
2367 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2368
2369 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2370}
2371
2372static noinline void xen_flush_tlb_all(void)
2373{
2374 struct mmuext_op *op;
2375 struct multicall_space mcs;
2376
2377 preempt_disable();
2378
2379 mcs = xen_mc_entry(sizeof(*op));
2380
2381 op = mcs.args;
2382 op->cmd = MMUEXT_TLB_FLUSH_ALL;
2383 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
2384
2385 xen_mc_issue(PARAVIRT_LAZY_MMU);
2386
2387 preempt_enable();
2388}
2389
2390#define REMAP_BATCH_SIZE 16
2391
2392struct remap_data {
2393 xen_pfn_t *pfn;
2394 bool contiguous;
2395 bool no_translate;
2396 pgprot_t prot;
2397 struct mmu_update *mmu_update;
2398};
2399
2400static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
2401{
2402 struct remap_data *rmd = data;
2403 pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
2404
2405
2406
2407
2408
2409 if (rmd->contiguous)
2410 (*rmd->pfn)++;
2411 else
2412 rmd->pfn++;
2413
2414 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2415 rmd->mmu_update->ptr |= rmd->no_translate ?
2416 MMU_PT_UPDATE_NO_TRANSLATE :
2417 MMU_NORMAL_PT_UPDATE;
2418 rmd->mmu_update->val = pte_val_ma(pte);
2419 rmd->mmu_update++;
2420
2421 return 0;
2422}
2423
2424int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
2425 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
2426 unsigned int domid, bool no_translate)
2427{
2428 int err = 0;
2429 struct remap_data rmd;
2430 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2431 unsigned long range;
2432 int mapped = 0;
2433
2434 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2435
2436 rmd.pfn = pfn;
2437 rmd.prot = prot;
2438
2439
2440
2441
2442 rmd.contiguous = !err_ptr;
2443 rmd.no_translate = no_translate;
2444
2445 while (nr) {
2446 int index = 0;
2447 int done = 0;
2448 int batch = min(REMAP_BATCH_SIZE, nr);
2449 int batch_left = batch;
2450
2451 range = (unsigned long)batch << PAGE_SHIFT;
2452
2453 rmd.mmu_update = mmu_update;
2454 err = apply_to_page_range(vma->vm_mm, addr, range,
2455 remap_area_pfn_pte_fn, &rmd);
2456 if (err)
2457 goto out;
2458
2459
2460
2461
2462
2463 do {
2464 int i;
2465
2466 err = HYPERVISOR_mmu_update(&mmu_update[index],
2467 batch_left, &done, domid);
2468
2469
2470
2471
2472
2473
2474 if (err_ptr) {
2475 for (i = index; i < index + done; i++)
2476 err_ptr[i] = 0;
2477 }
2478 if (err < 0) {
2479 if (!err_ptr)
2480 goto out;
2481 err_ptr[i] = err;
2482 done++;
2483 } else
2484 mapped += done;
2485 batch_left -= done;
2486 index += done;
2487 } while (batch_left);
2488
2489 nr -= batch;
2490 addr += range;
2491 if (err_ptr)
2492 err_ptr += batch;
2493 cond_resched();
2494 }
2495out:
2496
2497 xen_flush_tlb_all();
2498
2499 return err < 0 ? err : mapped;
2500}
2501EXPORT_SYMBOL_GPL(xen_remap_pfn);
2502
2503#ifdef CONFIG_KEXEC_CORE
2504phys_addr_t paddr_vmcoreinfo_note(void)
2505{
2506 if (xen_pv_domain())
2507 return virt_to_machine(vmcoreinfo_note).maddr;
2508 else
2509 return __pa(vmcoreinfo_note);
2510}
2511#endif
2512