1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151#include <linux/init.h>
152#include <linux/module.h>
153#include <linux/list.h>
154#include <linux/hash.h>
155#include <linux/sched.h>
156#include <linux/seq_file.h>
157
158#include <asm/cache.h>
159#include <asm/setup.h>
160
161#include <asm/xen/page.h>
162#include <asm/xen/hypercall.h>
163#include <asm/xen/hypervisor.h>
164#include <xen/balloon.h>
165#include <xen/grant_table.h>
166
167#include "multicalls.h"
168#include "xen-ops.h"
169
170static void __init m2p_override_init(void);
171
172unsigned long xen_max_p2m_pfn __read_mostly;
173
174#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
175#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
176#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
177
178#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
179
180
181static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
182static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
183static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
184
185static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
186static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
187static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
188
189static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
190
191RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
192RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
193
194
195
196RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
197
198
199
200
201
202
203RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
204
205static inline unsigned p2m_top_index(unsigned long pfn)
206{
207 BUG_ON(pfn >= MAX_P2M_PFN);
208 return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
209}
210
211static inline unsigned p2m_mid_index(unsigned long pfn)
212{
213 return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
214}
215
216static inline unsigned p2m_index(unsigned long pfn)
217{
218 return pfn % P2M_PER_PAGE;
219}
220
221static void p2m_top_init(unsigned long ***top)
222{
223 unsigned i;
224
225 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
226 top[i] = p2m_mid_missing;
227}
228
229static void p2m_top_mfn_init(unsigned long *top)
230{
231 unsigned i;
232
233 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
234 top[i] = virt_to_mfn(p2m_mid_missing_mfn);
235}
236
237static void p2m_top_mfn_p_init(unsigned long **top)
238{
239 unsigned i;
240
241 for (i = 0; i < P2M_TOP_PER_PAGE; i++)
242 top[i] = p2m_mid_missing_mfn;
243}
244
245static void p2m_mid_init(unsigned long **mid)
246{
247 unsigned i;
248
249 for (i = 0; i < P2M_MID_PER_PAGE; i++)
250 mid[i] = p2m_missing;
251}
252
253static void p2m_mid_mfn_init(unsigned long *mid)
254{
255 unsigned i;
256
257 for (i = 0; i < P2M_MID_PER_PAGE; i++)
258 mid[i] = virt_to_mfn(p2m_missing);
259}
260
261static void p2m_init(unsigned long *p2m)
262{
263 unsigned i;
264
265 for (i = 0; i < P2M_MID_PER_PAGE; i++)
266 p2m[i] = INVALID_P2M_ENTRY;
267}
268
269
270
271
272
273
274
275
276
277
278
279void __ref xen_build_mfn_list_list(void)
280{
281 unsigned long pfn;
282
283 if (xen_feature(XENFEAT_auto_translated_physmap))
284 return;
285
286
287 if (p2m_top_mfn == NULL) {
288 p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
289 p2m_mid_mfn_init(p2m_mid_missing_mfn);
290
291 p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
292 p2m_top_mfn_p_init(p2m_top_mfn_p);
293
294 p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
295 p2m_top_mfn_init(p2m_top_mfn);
296 } else {
297
298 p2m_mid_mfn_init(p2m_mid_missing_mfn);
299 }
300
301 for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
302 unsigned topidx = p2m_top_index(pfn);
303 unsigned mididx = p2m_mid_index(pfn);
304 unsigned long **mid;
305 unsigned long *mid_mfn_p;
306
307 mid = p2m_top[topidx];
308 mid_mfn_p = p2m_top_mfn_p[topidx];
309
310
311
312
313
314 if (mid == p2m_mid_missing) {
315 BUG_ON(mididx);
316 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
317 p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
318 pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
319 continue;
320 }
321
322 if (mid_mfn_p == p2m_mid_missing_mfn) {
323
324
325
326
327
328
329 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
330 p2m_mid_mfn_init(mid_mfn_p);
331
332 p2m_top_mfn_p[topidx] = mid_mfn_p;
333 }
334
335 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
336 mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
337 }
338}
339
340void xen_setup_mfn_list_list(void)
341{
342 if (xen_feature(XENFEAT_auto_translated_physmap))
343 return;
344
345 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
346
347 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
348 virt_to_mfn(p2m_top_mfn);
349 HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
350}
351
352
353void __init xen_build_dynamic_phys_to_machine(void)
354{
355 unsigned long *mfn_list;
356 unsigned long max_pfn;
357 unsigned long pfn;
358
359 if (xen_feature(XENFEAT_auto_translated_physmap))
360 return;
361
362 mfn_list = (unsigned long *)xen_start_info->mfn_list;
363 max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
364 xen_max_p2m_pfn = max_pfn;
365
366 p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
367 p2m_init(p2m_missing);
368
369 p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
370 p2m_mid_init(p2m_mid_missing);
371
372 p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
373 p2m_top_init(p2m_top);
374
375 p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
376 p2m_init(p2m_identity);
377
378
379
380
381
382
383 for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
384 unsigned topidx = p2m_top_index(pfn);
385 unsigned mididx = p2m_mid_index(pfn);
386
387 if (p2m_top[topidx] == p2m_mid_missing) {
388 unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
389 p2m_mid_init(mid);
390
391 p2m_top[topidx] = mid;
392 }
393
394
395
396
397
398
399 if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
400 unsigned long p2midx;
401
402 p2midx = max_pfn % P2M_PER_PAGE;
403 for ( ; p2midx < P2M_PER_PAGE; p2midx++)
404 mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
405 }
406 p2m_top[topidx][mididx] = &mfn_list[pfn];
407 }
408
409 m2p_override_init();
410}
411#ifdef CONFIG_X86_64
412#include <linux/bootmem.h>
413unsigned long __init xen_revector_p2m_tree(void)
414{
415 unsigned long va_start;
416 unsigned long va_end;
417 unsigned long pfn;
418 unsigned long pfn_free = 0;
419 unsigned long *mfn_list = NULL;
420 unsigned long size;
421
422 va_start = xen_start_info->mfn_list;
423
424
425 size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
426 va_end = va_start + size;
427
428
429 if (va_start <= __START_KERNEL_map && va_start >= __PAGE_OFFSET)
430 return 0;
431
432 mfn_list = alloc_bootmem_align(size, PAGE_SIZE);
433 if (!mfn_list) {
434 pr_warn("Could not allocate space for a new P2M tree!\n");
435 return xen_start_info->mfn_list;
436 }
437
438 memset(mfn_list, 0xFF, size);
439
440 for (pfn = 0; pfn < ALIGN(MAX_DOMAIN_PAGES, P2M_PER_PAGE); pfn += P2M_PER_PAGE) {
441 unsigned topidx = p2m_top_index(pfn);
442 unsigned mididx;
443 unsigned long *mid_p;
444
445 if (!p2m_top[topidx])
446 continue;
447
448 if (p2m_top[topidx] == p2m_mid_missing)
449 continue;
450
451 mididx = p2m_mid_index(pfn);
452 mid_p = p2m_top[topidx][mididx];
453 if (!mid_p)
454 continue;
455 if ((mid_p == p2m_missing) || (mid_p == p2m_identity))
456 continue;
457
458 if ((unsigned long)mid_p == INVALID_P2M_ENTRY)
459 continue;
460
461
462 if (mid_p >= (unsigned long *)va_start && mid_p <= (unsigned long *)va_end) {
463 unsigned long *new;
464
465 if (pfn_free > (size / sizeof(unsigned long))) {
466 WARN(1, "Only allocated for %ld pages, but we want %ld!\n",
467 size / sizeof(unsigned long), pfn_free);
468 return 0;
469 }
470 new = &mfn_list[pfn_free];
471
472 copy_page(new, mid_p);
473 p2m_top[topidx][mididx] = &mfn_list[pfn_free];
474 p2m_top_mfn_p[topidx][mididx] = virt_to_mfn(&mfn_list[pfn_free]);
475
476 pfn_free += P2M_PER_PAGE;
477
478 }
479
480 }
481 return (unsigned long)mfn_list;
482
483}
484#else
485unsigned long __init xen_revector_p2m_tree(void)
486{
487 return 0;
488}
489#endif
490unsigned long get_phys_to_machine(unsigned long pfn)
491{
492 unsigned topidx, mididx, idx;
493
494 if (unlikely(pfn >= MAX_P2M_PFN))
495 return INVALID_P2M_ENTRY;
496
497 topidx = p2m_top_index(pfn);
498 mididx = p2m_mid_index(pfn);
499 idx = p2m_index(pfn);
500
501
502
503
504
505
506 if (p2m_top[topidx][mididx] == p2m_identity)
507 return IDENTITY_FRAME(pfn);
508
509 return p2m_top[topidx][mididx][idx];
510}
511EXPORT_SYMBOL_GPL(get_phys_to_machine);
512
513static void *alloc_p2m_page(void)
514{
515 return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
516}
517
518static void free_p2m_page(void *p)
519{
520 free_page((unsigned long)p);
521}
522
523
524
525
526
527
528
529
530static bool alloc_p2m(unsigned long pfn)
531{
532 unsigned topidx, mididx;
533 unsigned long ***top_p, **mid;
534 unsigned long *top_mfn_p, *mid_mfn;
535
536 topidx = p2m_top_index(pfn);
537 mididx = p2m_mid_index(pfn);
538
539 top_p = &p2m_top[topidx];
540 mid = *top_p;
541
542 if (mid == p2m_mid_missing) {
543
544 mid = alloc_p2m_page();
545 if (!mid)
546 return false;
547
548 p2m_mid_init(mid);
549
550 if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
551 free_p2m_page(mid);
552 }
553
554 top_mfn_p = &p2m_top_mfn[topidx];
555 mid_mfn = p2m_top_mfn_p[topidx];
556
557 BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
558
559 if (mid_mfn == p2m_mid_missing_mfn) {
560
561 unsigned long missing_mfn;
562 unsigned long mid_mfn_mfn;
563
564 mid_mfn = alloc_p2m_page();
565 if (!mid_mfn)
566 return false;
567
568 p2m_mid_mfn_init(mid_mfn);
569
570 missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
571 mid_mfn_mfn = virt_to_mfn(mid_mfn);
572 if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
573 free_p2m_page(mid_mfn);
574 else
575 p2m_top_mfn_p[topidx] = mid_mfn;
576 }
577
578 if (p2m_top[topidx][mididx] == p2m_identity ||
579 p2m_top[topidx][mididx] == p2m_missing) {
580
581 unsigned long *p2m;
582 unsigned long *p2m_orig = p2m_top[topidx][mididx];
583
584 p2m = alloc_p2m_page();
585 if (!p2m)
586 return false;
587
588 p2m_init(p2m);
589
590 if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)
591 free_p2m_page(p2m);
592 else
593 mid_mfn[mididx] = virt_to_mfn(p2m);
594 }
595
596 return true;
597}
598
599static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
600{
601 unsigned topidx, mididx, idx;
602 unsigned long *p2m;
603 unsigned long *mid_mfn_p;
604
605 topidx = p2m_top_index(pfn);
606 mididx = p2m_mid_index(pfn);
607 idx = p2m_index(pfn);
608
609
610 if (!idx && check_boundary)
611 return false;
612
613 WARN(p2m_top[topidx][mididx] == p2m_identity,
614 "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
615 topidx, mididx);
616
617
618
619
620 if (p2m_top[topidx][mididx] != p2m_missing)
621 return false;
622
623
624 p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
625
626 p2m_init(p2m);
627
628 p2m_top[topidx][mididx] = p2m;
629
630
631
632 mid_mfn_p = p2m_top_mfn_p[topidx];
633 WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
634 "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
635 topidx, mididx);
636 mid_mfn_p[mididx] = virt_to_mfn(p2m);
637
638 return true;
639}
640
641static bool __init early_alloc_p2m(unsigned long pfn)
642{
643 unsigned topidx = p2m_top_index(pfn);
644 unsigned long *mid_mfn_p;
645 unsigned long **mid;
646
647 mid = p2m_top[topidx];
648 mid_mfn_p = p2m_top_mfn_p[topidx];
649 if (mid == p2m_mid_missing) {
650 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
651
652 p2m_mid_init(mid);
653
654 p2m_top[topidx] = mid;
655
656 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
657 }
658
659 if (mid_mfn_p == p2m_mid_missing_mfn) {
660 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
661 p2m_mid_mfn_init(mid_mfn_p);
662
663 p2m_top_mfn_p[topidx] = mid_mfn_p;
664 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
665
666
667 }
668 return true;
669}
670
671
672
673
674
675
676
677bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn)
678{
679 unsigned topidx;
680 unsigned mididx;
681 unsigned ident_pfns;
682 unsigned inv_pfns;
683 unsigned long *p2m;
684 unsigned long *mid_mfn_p;
685 unsigned idx;
686 unsigned long pfn;
687
688
689 if (p2m_index(set_pfn))
690 return false;
691
692 for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
693 topidx = p2m_top_index(pfn);
694
695 if (!p2m_top[topidx])
696 continue;
697
698 if (p2m_top[topidx] == p2m_mid_missing)
699 continue;
700
701 mididx = p2m_mid_index(pfn);
702 p2m = p2m_top[topidx][mididx];
703 if (!p2m)
704 continue;
705
706 if ((p2m == p2m_missing) || (p2m == p2m_identity))
707 continue;
708
709 if ((unsigned long)p2m == INVALID_P2M_ENTRY)
710 continue;
711
712 ident_pfns = 0;
713 inv_pfns = 0;
714 for (idx = 0; idx < P2M_PER_PAGE; idx++) {
715
716 if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
717 ident_pfns++;
718 else if (p2m[idx] == INVALID_P2M_ENTRY)
719 inv_pfns++;
720 else
721 break;
722 }
723 if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
724 goto found;
725 }
726 return false;
727found:
728
729 p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
730
731 mid_mfn_p = p2m_top_mfn_p[topidx];
732
733
734 mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
735
736
737 topidx = p2m_top_index(set_pfn);
738 mididx = p2m_mid_index(set_pfn);
739
740
741 if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
742 early_alloc_p2m(set_pfn);
743
744 if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
745 return false;
746
747 p2m_init(p2m);
748 p2m_top[topidx][mididx] = p2m;
749 mid_mfn_p = p2m_top_mfn_p[topidx];
750 mid_mfn_p[mididx] = virt_to_mfn(p2m);
751
752 return true;
753}
754bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
755{
756 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
757 if (!early_alloc_p2m(pfn))
758 return false;
759
760 if (early_can_reuse_p2m_middle(pfn, mfn))
761 return __set_phys_to_machine(pfn, mfn);
762
763 if (!early_alloc_p2m_middle(pfn, false ))
764 return false;
765
766 if (!__set_phys_to_machine(pfn, mfn))
767 return false;
768 }
769
770 return true;
771}
772unsigned long __init set_phys_range_identity(unsigned long pfn_s,
773 unsigned long pfn_e)
774{
775 unsigned long pfn;
776
777 if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN))
778 return 0;
779
780 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
781 return pfn_e - pfn_s;
782
783 if (pfn_s > pfn_e)
784 return 0;
785
786 for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1));
787 pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
788 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
789 {
790 WARN_ON(!early_alloc_p2m(pfn));
791 }
792
793 early_alloc_p2m_middle(pfn_s, true);
794 early_alloc_p2m_middle(pfn_e, true);
795
796 for (pfn = pfn_s; pfn < pfn_e; pfn++)
797 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
798 break;
799
800 if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s),
801 "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
802 (pfn_e - pfn_s) - (pfn - pfn_s)))
803 printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn);
804
805 return pfn - pfn_s;
806}
807
808
809bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
810{
811 unsigned topidx, mididx, idx;
812
813
814 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
815 return true;
816
817 if (unlikely(pfn >= MAX_P2M_PFN)) {
818 BUG_ON(mfn != INVALID_P2M_ENTRY);
819 return true;
820 }
821
822 topidx = p2m_top_index(pfn);
823 mididx = p2m_mid_index(pfn);
824 idx = p2m_index(pfn);
825
826
827
828
829 if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
830 if (p2m_top[topidx][mididx] == p2m_identity)
831 return true;
832
833
834 if (p2m_top[topidx][mididx] == p2m_missing) {
835 WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing,
836 p2m_identity) != p2m_missing);
837 return true;
838 }
839 }
840
841 if (p2m_top[topidx][mididx] == p2m_missing)
842 return mfn == INVALID_P2M_ENTRY;
843
844 p2m_top[topidx][mididx][idx] = mfn;
845
846 return true;
847}
848
849bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
850{
851 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
852 if (!alloc_p2m(pfn))
853 return false;
854
855 if (!__set_phys_to_machine(pfn, mfn))
856 return false;
857 }
858
859 return true;
860}
861
862#define M2P_OVERRIDE_HASH_SHIFT 10
863#define M2P_OVERRIDE_HASH (1 << M2P_OVERRIDE_HASH_SHIFT)
864
865static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH);
866static DEFINE_SPINLOCK(m2p_override_lock);
867
868static void __init m2p_override_init(void)
869{
870 unsigned i;
871
872 m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
873 sizeof(unsigned long));
874
875 for (i = 0; i < M2P_OVERRIDE_HASH; i++)
876 INIT_LIST_HEAD(&m2p_overrides[i]);
877}
878
879static unsigned long mfn_hash(unsigned long mfn)
880{
881 return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
882}
883
884int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
885 struct gnttab_map_grant_ref *kmap_ops,
886 struct page **pages, unsigned int count)
887{
888 int i, ret = 0;
889 bool lazy = false;
890 pte_t *pte;
891
892 if (xen_feature(XENFEAT_auto_translated_physmap))
893 return 0;
894
895 if (kmap_ops &&
896 !in_interrupt() &&
897 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
898 arch_enter_lazy_mmu_mode();
899 lazy = true;
900 }
901
902 for (i = 0; i < count; i++) {
903 unsigned long mfn, pfn;
904
905
906 if (map_ops[i].status)
907 continue;
908
909 if (map_ops[i].flags & GNTMAP_contains_pte) {
910 pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
911 (map_ops[i].host_addr & ~PAGE_MASK));
912 mfn = pte_mfn(*pte);
913 } else {
914 mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
915 }
916 pfn = page_to_pfn(pages[i]);
917
918 WARN_ON(PagePrivate(pages[i]));
919 SetPagePrivate(pages[i]);
920 set_page_private(pages[i], mfn);
921 pages[i]->index = pfn_to_mfn(pfn);
922
923 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
924 ret = -ENOMEM;
925 goto out;
926 }
927
928 if (kmap_ops) {
929 ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
930 if (ret)
931 goto out;
932 }
933 }
934
935out:
936 if (lazy)
937 arch_leave_lazy_mmu_mode();
938
939 return ret;
940}
941EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
942
943
944int m2p_add_override(unsigned long mfn, struct page *page,
945 struct gnttab_map_grant_ref *kmap_op)
946{
947 unsigned long flags;
948 unsigned long pfn;
949 unsigned long uninitialized_var(address);
950 unsigned level;
951 pte_t *ptep = NULL;
952
953 pfn = page_to_pfn(page);
954 if (!PageHighMem(page)) {
955 address = (unsigned long)__va(pfn << PAGE_SHIFT);
956 ptep = lookup_address(address, &level);
957 if (WARN(ptep == NULL || level != PG_LEVEL_4K,
958 "m2p_add_override: pfn %lx not mapped", pfn))
959 return -EINVAL;
960 }
961
962 if (kmap_op != NULL) {
963 if (!PageHighMem(page)) {
964 struct multicall_space mcs =
965 xen_mc_entry(sizeof(*kmap_op));
966
967 MULTI_grant_table_op(mcs.mc,
968 GNTTABOP_map_grant_ref, kmap_op, 1);
969
970 xen_mc_issue(PARAVIRT_LAZY_MMU);
971 }
972 }
973 spin_lock_irqsave(&m2p_override_lock, flags);
974 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
975 spin_unlock_irqrestore(&m2p_override_lock, flags);
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991 pfn = mfn_to_pfn_no_overrides(mfn);
992 if (get_phys_to_machine(pfn) == mfn)
993 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
994
995 return 0;
996}
997EXPORT_SYMBOL_GPL(m2p_add_override);
998
999int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
1000 struct gnttab_map_grant_ref *kmap_ops,
1001 struct page **pages, unsigned int count)
1002{
1003 int i, ret = 0;
1004 bool lazy = false;
1005
1006 if (xen_feature(XENFEAT_auto_translated_physmap))
1007 return 0;
1008
1009 if (kmap_ops &&
1010 !in_interrupt() &&
1011 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
1012 arch_enter_lazy_mmu_mode();
1013 lazy = true;
1014 }
1015
1016 for (i = 0; i < count; i++) {
1017 unsigned long mfn = get_phys_to_machine(page_to_pfn(pages[i]));
1018 unsigned long pfn = page_to_pfn(pages[i]);
1019
1020 if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
1021 ret = -EINVAL;
1022 goto out;
1023 }
1024
1025 set_page_private(pages[i], INVALID_P2M_ENTRY);
1026 WARN_ON(!PagePrivate(pages[i]));
1027 ClearPagePrivate(pages[i]);
1028 set_phys_to_machine(pfn, pages[i]->index);
1029
1030 if (kmap_ops)
1031 ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
1032 if (ret)
1033 goto out;
1034 }
1035
1036out:
1037 if (lazy)
1038 arch_leave_lazy_mmu_mode();
1039 return ret;
1040}
1041EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
1042
1043int m2p_remove_override(struct page *page,
1044 struct gnttab_map_grant_ref *kmap_op,
1045 unsigned long mfn)
1046{
1047 unsigned long flags;
1048 unsigned long pfn;
1049 unsigned long uninitialized_var(address);
1050 unsigned level;
1051 pte_t *ptep = NULL;
1052
1053 pfn = page_to_pfn(page);
1054
1055 if (!PageHighMem(page)) {
1056 address = (unsigned long)__va(pfn << PAGE_SHIFT);
1057 ptep = lookup_address(address, &level);
1058
1059 if (WARN(ptep == NULL || level != PG_LEVEL_4K,
1060 "m2p_remove_override: pfn %lx not mapped", pfn))
1061 return -EINVAL;
1062 }
1063
1064 spin_lock_irqsave(&m2p_override_lock, flags);
1065 list_del(&page->lru);
1066 spin_unlock_irqrestore(&m2p_override_lock, flags);
1067
1068 if (kmap_op != NULL) {
1069 if (!PageHighMem(page)) {
1070 struct multicall_space mcs;
1071 struct gnttab_unmap_and_replace *unmap_op;
1072 struct page *scratch_page = get_balloon_scratch_page();
1073 unsigned long scratch_page_address = (unsigned long)
1074 __va(page_to_pfn(scratch_page) << PAGE_SHIFT);
1075
1076
1077
1078
1079
1080
1081
1082
1083 if (kmap_op->handle == -1)
1084 xen_mc_flush();
1085
1086
1087
1088
1089 if (kmap_op->handle == GNTST_general_error) {
1090 printk(KERN_WARNING "m2p_remove_override: "
1091 "pfn %lx mfn %lx, failed to modify kernel mappings",
1092 pfn, mfn);
1093 put_balloon_scratch_page();
1094 return -1;
1095 }
1096
1097 xen_mc_batch();
1098
1099 mcs = __xen_mc_entry(
1100 sizeof(struct gnttab_unmap_and_replace));
1101 unmap_op = mcs.args;
1102 unmap_op->host_addr = kmap_op->host_addr;
1103 unmap_op->new_addr = scratch_page_address;
1104 unmap_op->handle = kmap_op->handle;
1105
1106 MULTI_grant_table_op(mcs.mc,
1107 GNTTABOP_unmap_and_replace, unmap_op, 1);
1108
1109 mcs = __xen_mc_entry(0);
1110 MULTI_update_va_mapping(mcs.mc, scratch_page_address,
1111 pfn_pte(page_to_pfn(scratch_page),
1112 PAGE_KERNEL_RO), 0);
1113
1114 xen_mc_issue(PARAVIRT_LAZY_MMU);
1115
1116 kmap_op->host_addr = 0;
1117 put_balloon_scratch_page();
1118 }
1119 }
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 mfn &= ~FOREIGN_FRAME_BIT;
1132 pfn = mfn_to_pfn_no_overrides(mfn);
1133 if (get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
1134 m2p_find_override(mfn) == NULL)
1135 set_phys_to_machine(pfn, mfn);
1136
1137 return 0;
1138}
1139EXPORT_SYMBOL_GPL(m2p_remove_override);
1140
1141struct page *m2p_find_override(unsigned long mfn)
1142{
1143 unsigned long flags;
1144 struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)];
1145 struct page *p, *ret;
1146
1147 ret = NULL;
1148
1149 spin_lock_irqsave(&m2p_override_lock, flags);
1150
1151 list_for_each_entry(p, bucket, lru) {
1152 if (page_private(p) == mfn) {
1153 ret = p;
1154 break;
1155 }
1156 }
1157
1158 spin_unlock_irqrestore(&m2p_override_lock, flags);
1159
1160 return ret;
1161}
1162
1163unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
1164{
1165 struct page *p = m2p_find_override(mfn);
1166 unsigned long ret = pfn;
1167
1168 if (p)
1169 ret = page_to_pfn(p);
1170
1171 return ret;
1172}
1173EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
1174
1175#ifdef CONFIG_XEN_DEBUG_FS
1176#include <linux/debugfs.h>
1177#include "debugfs.h"
1178static int p2m_dump_show(struct seq_file *m, void *v)
1179{
1180 static const char * const level_name[] = { "top", "middle",
1181 "entry", "abnormal", "error"};
1182#define TYPE_IDENTITY 0
1183#define TYPE_MISSING 1
1184#define TYPE_PFN 2
1185#define TYPE_UNKNOWN 3
1186 static const char * const type_name[] = {
1187 [TYPE_IDENTITY] = "identity",
1188 [TYPE_MISSING] = "missing",
1189 [TYPE_PFN] = "pfn",
1190 [TYPE_UNKNOWN] = "abnormal"};
1191 unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
1192 unsigned int uninitialized_var(prev_level);
1193 unsigned int uninitialized_var(prev_type);
1194
1195 if (!p2m_top)
1196 return 0;
1197
1198 for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) {
1199 unsigned topidx = p2m_top_index(pfn);
1200 unsigned mididx = p2m_mid_index(pfn);
1201 unsigned idx = p2m_index(pfn);
1202 unsigned lvl, type;
1203
1204 lvl = 4;
1205 type = TYPE_UNKNOWN;
1206 if (p2m_top[topidx] == p2m_mid_missing) {
1207 lvl = 0; type = TYPE_MISSING;
1208 } else if (p2m_top[topidx] == NULL) {
1209 lvl = 0; type = TYPE_UNKNOWN;
1210 } else if (p2m_top[topidx][mididx] == NULL) {
1211 lvl = 1; type = TYPE_UNKNOWN;
1212 } else if (p2m_top[topidx][mididx] == p2m_identity) {
1213 lvl = 1; type = TYPE_IDENTITY;
1214 } else if (p2m_top[topidx][mididx] == p2m_missing) {
1215 lvl = 1; type = TYPE_MISSING;
1216 } else if (p2m_top[topidx][mididx][idx] == 0) {
1217 lvl = 2; type = TYPE_UNKNOWN;
1218 } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) {
1219 lvl = 2; type = TYPE_IDENTITY;
1220 } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) {
1221 lvl = 2; type = TYPE_MISSING;
1222 } else if (p2m_top[topidx][mididx][idx] == pfn) {
1223 lvl = 2; type = TYPE_PFN;
1224 } else if (p2m_top[topidx][mididx][idx] != pfn) {
1225 lvl = 2; type = TYPE_PFN;
1226 }
1227 if (pfn == 0) {
1228 prev_level = lvl;
1229 prev_type = type;
1230 }
1231 if (pfn == MAX_DOMAIN_PAGES-1) {
1232 lvl = 3;
1233 type = TYPE_UNKNOWN;
1234 }
1235 if (prev_type != type) {
1236 seq_printf(m, " [0x%lx->0x%lx] %s\n",
1237 prev_pfn_type, pfn, type_name[prev_type]);
1238 prev_pfn_type = pfn;
1239 prev_type = type;
1240 }
1241 if (prev_level != lvl) {
1242 seq_printf(m, " [0x%lx->0x%lx] level %s\n",
1243 prev_pfn_level, pfn, level_name[prev_level]);
1244 prev_pfn_level = pfn;
1245 prev_level = lvl;
1246 }
1247 }
1248 return 0;
1249#undef TYPE_IDENTITY
1250#undef TYPE_MISSING
1251#undef TYPE_PFN
1252#undef TYPE_UNKNOWN
1253}
1254
1255static int p2m_dump_open(struct inode *inode, struct file *filp)
1256{
1257 return single_open(filp, p2m_dump_show, NULL);
1258}
1259
1260static const struct file_operations p2m_dump_fops = {
1261 .open = p2m_dump_open,
1262 .read = seq_read,
1263 .llseek = seq_lseek,
1264 .release = single_release,
1265};
1266
1267static struct dentry *d_mmu_debug;
1268
1269static int __init xen_p2m_debugfs(void)
1270{
1271 struct dentry *d_xen = xen_init_debugfs();
1272
1273 if (d_xen == NULL)
1274 return -ENOMEM;
1275
1276 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
1277
1278 debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
1279 return 0;
1280}
1281fs_initcall(xen_p2m_debugfs);
1282#endif
1283