linux/arch/x86/xen/p2m.c
<<
>>
Prefs
   1/*
   2 * Xen leaves the responsibility for maintaining p2m mappings to the
   3 * guests themselves, but it must also access and update the p2m array
   4 * during suspend/resume when all the pages are reallocated.
   5 *
   6 * The p2m table is logically a flat array, but we implement it as a
   7 * three-level tree to allow the address space to be sparse.
   8 *
   9 *                               Xen
  10 *                                |
  11 *     p2m_top              p2m_top_mfn
  12 *       /  \                   /   \
  13 * p2m_mid p2m_mid      p2m_mid_mfn p2m_mid_mfn
  14 *    / \      / \         /           /
  15 *  p2m p2m p2m p2m p2m p2m p2m ...
  16 *
  17 * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
  18 *
  19 * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the
  20 * maximum representable pseudo-physical address space is:
  21 *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
  22 *
  23 * P2M_PER_PAGE depends on the architecture, as a mfn is always
  24 * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
  25 * 512 and 1024 entries respectively. 
  26 *
  27 * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
  28 *
  29 * However not all entries are filled with MFNs. Specifically for all other
  30 * leaf entries, or for the top  root, or middle one, for which there is a void
  31 * entry, we assume it is  "missing". So (for example)
  32 *  pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
  33 *
  34 * We also have the possibility of setting 1-1 mappings on certain regions, so
  35 * that:
  36 *  pfn_to_mfn(0xc0000)=0xc0000
  37 *
  38 * The benefit of this is, that we can assume for non-RAM regions (think
  39 * PCI BARs, or ACPI spaces), we can create mappings easily b/c we
  40 * get the PFN value to match the MFN.
  41 *
  42 * For this to work efficiently we have one new page p2m_identity and
  43 * allocate (via reserved_brk) any other pages we need to cover the sides
  44 * (1GB or 4MB boundary violations). All entries in p2m_identity are set to
  45 * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs,
  46 * no other fancy value).
  47 *
  48 * On lookup we spot that the entry points to p2m_identity and return the
  49 * identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
  50 * If the entry points to an allocated page, we just proceed as before and
  51 * return the PFN.  If the PFN has IDENTITY_FRAME_BIT set we unmask that in
  52 * appropriate functions (pfn_to_mfn).
  53 *
  54 * The reason for having the IDENTITY_FRAME_BIT instead of just returning the
  55 * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
  56 * non-identity pfn. To protect ourselves against we elect to set (and get) the
  57 * IDENTITY_FRAME_BIT on all identity mapped PFNs.
  58 *
  59 * This simplistic diagram is used to explain the more subtle piece of code.
  60 * There is also a digram of the P2M at the end that can help.
  61 * Imagine your E820 looking as so:
  62 *
  63 *                    1GB                                           2GB
  64 * /-------------------+---------\/----\         /----------\    /---+-----\
  65 * | System RAM        | Sys RAM ||ACPI|         | reserved |    | Sys RAM |
  66 * \-------------------+---------/\----/         \----------/    \---+-----/
  67 *                               ^- 1029MB                       ^- 2001MB
  68 *
  69 * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100),
  70 *  2048MB = 524288 (0x80000)]
  71 *
  72 * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB
  73 * is actually not present (would have to kick the balloon driver to put it in).
  74 *
  75 * When we are told to set the PFNs for identity mapping (see patch: "xen/setup:
  76 * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start
  77 * of the PFN and the end PFN (263424 and 512256 respectively). The first step
  78 * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page
  79 * covers 512^2 of page estate (1GB) and in case the start or end PFN is not
  80 * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn
  81 * to end pfn.  We reserve_brk top leaf pages if they are missing (means they
  82 * point to p2m_mid_missing).
  83 *
  84 * With the E820 example above, 263424 is not 1GB aligned so we allocate a
  85 * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000.
  86 * Each entry in the allocate page is "missing" (points to p2m_missing).
  87 *
  88 * Next stage is to determine if we need to do a more granular boundary check
  89 * on the 4MB (or 2MB depending on architecture) off the start and end pfn's.
  90 * We check if the start pfn and end pfn violate that boundary check, and if
  91 * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer
  92 * granularity of setting which PFNs are missing and which ones are identity.
  93 * In our example 263424 and 512256 both fail the check so we reserve_brk two
  94 * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing"
  95 * values) and assign them to p2m[1][2] and p2m[1][488] respectively.
  96 *
  97 * At this point we would at minimum reserve_brk one page, but could be up to
  98 * three. Each call to set_phys_range_identity has at maximum a three page
  99 * cost. If we were to query the P2M at this stage, all those entries from
 100 * start PFN through end PFN (so 1029MB -> 2001MB) would return
 101 * INVALID_P2M_ENTRY ("missing").
 102 *
 103 * The next step is to walk from the start pfn to the end pfn setting
 104 * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity.
 105 * If we find that the middle leaf is pointing to p2m_missing we can swap it
 106 * over to p2m_identity - this way covering 4MB (or 2MB) PFN space.  At this
 107 * point we do not need to worry about boundary aligment (so no need to
 108 * reserve_brk a middle page, figure out which PFNs are "missing" and which
 109 * ones are identity), as that has been done earlier.  If we find that the
 110 * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference
 111 * that page (which covers 512 PFNs) and set the appropriate PFN with
 112 * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we
 113 * set from p2m[1][2][256->511] and p2m[1][488][0->256] with
 114 * IDENTITY_FRAME_BIT set.
 115 *
 116 * All other regions that are void (or not filled) either point to p2m_missing
 117 * (considered missing) or have the default value of INVALID_P2M_ENTRY (also
 118 * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511]
 119 * contain the INVALID_P2M_ENTRY value and are considered "missing."
 120 *
 121 * This is what the p2m ends up looking (for the E820 above) with this
 122 * fabulous drawing:
 123 *
 124 *    p2m         /--------------\
 125 *  /-----\       | &mfn_list[0],|                           /-----------------\
 126 *  |  0  |------>| &mfn_list[1],|    /---------------\      | ~0, ~0, ..      |
 127 *  |-----|       |  ..., ~0, ~0 |    | ~0, ~0, [x]---+----->| IDENTITY [@256] |
 128 *  |  1  |---\   \--------------/    | [p2m_identity]+\     | IDENTITY [@257] |
 129 *  |-----|    \                      | [p2m_identity]+\\    | ....            |
 130 *  |  2  |--\  \-------------------->|  ...          | \\   \----------------/
 131 *  |-----|   \                       \---------------/  \\
 132 *  |  3  |\   \                                          \\  p2m_identity
 133 *  |-----| \   \-------------------->/---------------\   /-----------------\
 134 *  | ..  +->+                        | [p2m_identity]+-->| ~0, ~0, ~0, ... |
 135 *  \-----/ /                         | [p2m_identity]+-->| ..., ~0         |
 136 *         / /---------------\        | ....          |   \-----------------/
 137 *        /  | IDENTITY[@0]  |      /-+-[x], ~0, ~0.. |
 138 *       /   | IDENTITY[@256]|<----/  \---------------/
 139 *      /    | ~0, ~0, ....  |
 140 *     |     \---------------/
 141 *     |
 142 *     p2m_missing             p2m_missing
 143 * /------------------\     /------------\
 144 * | [p2m_mid_missing]+---->| ~0, ~0, ~0 |
 145 * | [p2m_mid_missing]+---->| ..., ~0    |
 146 * \------------------/     \------------/
 147 *
 148 * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT)
 149 */
 150
 151#include <linux/init.h>
 152#include <linux/module.h>
 153#include <linux/list.h>
 154#include <linux/hash.h>
 155#include <linux/sched.h>
 156#include <linux/seq_file.h>
 157
 158#include <asm/cache.h>
 159#include <asm/setup.h>
 160
 161#include <asm/xen/page.h>
 162#include <asm/xen/hypercall.h>
 163#include <asm/xen/hypervisor.h>
 164#include <xen/grant_table.h>
 165
 166#include "multicalls.h"
 167#include "xen-ops.h"
 168
 169static void __init m2p_override_init(void);
 170
 171unsigned long xen_max_p2m_pfn __read_mostly;
 172
 173#define P2M_PER_PAGE            (PAGE_SIZE / sizeof(unsigned long))
 174#define P2M_MID_PER_PAGE        (PAGE_SIZE / sizeof(unsigned long *))
 175#define P2M_TOP_PER_PAGE        (PAGE_SIZE / sizeof(unsigned long **))
 176
 177#define MAX_P2M_PFN             (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
 178
 179/* Placeholders for holes in the address space */
 180static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
 181static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
 182static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
 183
 184static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
 185static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
 186static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
 187
 188static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
 189
 190RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
 191RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
 192
 193/* We might hit two boundary violations at the start and end, at max each
 194 * boundary violation will require three middle nodes. */
 195RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
 196
 197static inline unsigned p2m_top_index(unsigned long pfn)
 198{
 199        BUG_ON(pfn >= MAX_P2M_PFN);
 200        return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
 201}
 202
 203static inline unsigned p2m_mid_index(unsigned long pfn)
 204{
 205        return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
 206}
 207
 208static inline unsigned p2m_index(unsigned long pfn)
 209{
 210        return pfn % P2M_PER_PAGE;
 211}
 212
 213static void p2m_top_init(unsigned long ***top)
 214{
 215        unsigned i;
 216
 217        for (i = 0; i < P2M_TOP_PER_PAGE; i++)
 218                top[i] = p2m_mid_missing;
 219}
 220
 221static void p2m_top_mfn_init(unsigned long *top)
 222{
 223        unsigned i;
 224
 225        for (i = 0; i < P2M_TOP_PER_PAGE; i++)
 226                top[i] = virt_to_mfn(p2m_mid_missing_mfn);
 227}
 228
 229static void p2m_top_mfn_p_init(unsigned long **top)
 230{
 231        unsigned i;
 232
 233        for (i = 0; i < P2M_TOP_PER_PAGE; i++)
 234                top[i] = p2m_mid_missing_mfn;
 235}
 236
 237static void p2m_mid_init(unsigned long **mid)
 238{
 239        unsigned i;
 240
 241        for (i = 0; i < P2M_MID_PER_PAGE; i++)
 242                mid[i] = p2m_missing;
 243}
 244
 245static void p2m_mid_mfn_init(unsigned long *mid)
 246{
 247        unsigned i;
 248
 249        for (i = 0; i < P2M_MID_PER_PAGE; i++)
 250                mid[i] = virt_to_mfn(p2m_missing);
 251}
 252
 253static void p2m_init(unsigned long *p2m)
 254{
 255        unsigned i;
 256
 257        for (i = 0; i < P2M_MID_PER_PAGE; i++)
 258                p2m[i] = INVALID_P2M_ENTRY;
 259}
 260
 261/*
 262 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
 263 *
 264 * This is called both at boot time, and after resuming from suspend:
 265 * - At boot time we're called very early, and must use extend_brk()
 266 *   to allocate memory.
 267 *
 268 * - After resume we're called from within stop_machine, but the mfn
 269 *   tree should alreay be completely allocated.
 270 */
 271void __ref xen_build_mfn_list_list(void)
 272{
 273        unsigned long pfn;
 274
 275        /* Pre-initialize p2m_top_mfn to be completely missing */
 276        if (p2m_top_mfn == NULL) {
 277                p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
 278                p2m_mid_mfn_init(p2m_mid_missing_mfn);
 279
 280                p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
 281                p2m_top_mfn_p_init(p2m_top_mfn_p);
 282
 283                p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
 284                p2m_top_mfn_init(p2m_top_mfn);
 285        } else {
 286                /* Reinitialise, mfn's all change after migration */
 287                p2m_mid_mfn_init(p2m_mid_missing_mfn);
 288        }
 289
 290        for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
 291                unsigned topidx = p2m_top_index(pfn);
 292                unsigned mididx = p2m_mid_index(pfn);
 293                unsigned long **mid;
 294                unsigned long *mid_mfn_p;
 295
 296                mid = p2m_top[topidx];
 297                mid_mfn_p = p2m_top_mfn_p[topidx];
 298
 299                /* Don't bother allocating any mfn mid levels if
 300                 * they're just missing, just update the stored mfn,
 301                 * since all could have changed over a migrate.
 302                 */
 303                if (mid == p2m_mid_missing) {
 304                        BUG_ON(mididx);
 305                        BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
 306                        p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
 307                        pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
 308                        continue;
 309                }
 310
 311                if (mid_mfn_p == p2m_mid_missing_mfn) {
 312                        /*
 313                         * XXX boot-time only!  We should never find
 314                         * missing parts of the mfn tree after
 315                         * runtime.  extend_brk() will BUG if we call
 316                         * it too late.
 317                         */
 318                        mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
 319                        p2m_mid_mfn_init(mid_mfn_p);
 320
 321                        p2m_top_mfn_p[topidx] = mid_mfn_p;
 322                }
 323
 324                p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
 325                mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]);
 326        }
 327}
 328
 329void xen_setup_mfn_list_list(void)
 330{
 331        BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
 332
 333        HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
 334                virt_to_mfn(p2m_top_mfn);
 335        HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
 336}
 337
 338/* Set up p2m_top to point to the domain-builder provided p2m pages */
 339void __init xen_build_dynamic_phys_to_machine(void)
 340{
 341        unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
 342        unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
 343        unsigned long pfn;
 344
 345        xen_max_p2m_pfn = max_pfn;
 346
 347        p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
 348        p2m_init(p2m_missing);
 349
 350        p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
 351        p2m_mid_init(p2m_mid_missing);
 352
 353        p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
 354        p2m_top_init(p2m_top);
 355
 356        p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
 357        p2m_init(p2m_identity);
 358
 359        /*
 360         * The domain builder gives us a pre-constructed p2m array in
 361         * mfn_list for all the pages initially given to us, so we just
 362         * need to graft that into our tree structure.
 363         */
 364        for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) {
 365                unsigned topidx = p2m_top_index(pfn);
 366                unsigned mididx = p2m_mid_index(pfn);
 367
 368                if (p2m_top[topidx] == p2m_mid_missing) {
 369                        unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
 370                        p2m_mid_init(mid);
 371
 372                        p2m_top[topidx] = mid;
 373                }
 374
 375                /*
 376                 * As long as the mfn_list has enough entries to completely
 377                 * fill a p2m page, pointing into the array is ok. But if
 378                 * not the entries beyond the last pfn will be undefined.
 379                 */
 380                if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
 381                        unsigned long p2midx;
 382
 383                        p2midx = max_pfn % P2M_PER_PAGE;
 384                        for ( ; p2midx < P2M_PER_PAGE; p2midx++)
 385                                mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
 386                }
 387                p2m_top[topidx][mididx] = &mfn_list[pfn];
 388        }
 389
 390        m2p_override_init();
 391}
 392
 393unsigned long get_phys_to_machine(unsigned long pfn)
 394{
 395        unsigned topidx, mididx, idx;
 396
 397        if (unlikely(pfn >= MAX_P2M_PFN))
 398                return INVALID_P2M_ENTRY;
 399
 400        topidx = p2m_top_index(pfn);
 401        mididx = p2m_mid_index(pfn);
 402        idx = p2m_index(pfn);
 403
 404        /*
 405         * The INVALID_P2M_ENTRY is filled in both p2m_*identity
 406         * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
 407         * would be wrong.
 408         */
 409        if (p2m_top[topidx][mididx] == p2m_identity)
 410                return IDENTITY_FRAME(pfn);
 411
 412        return p2m_top[topidx][mididx][idx];
 413}
 414EXPORT_SYMBOL_GPL(get_phys_to_machine);
 415
 416static void *alloc_p2m_page(void)
 417{
 418        return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
 419}
 420
 421static void free_p2m_page(void *p)
 422{
 423        free_page((unsigned long)p);
 424}
 425
 426/* 
 427 * Fully allocate the p2m structure for a given pfn.  We need to check
 428 * that both the top and mid levels are allocated, and make sure the
 429 * parallel mfn tree is kept in sync.  We may race with other cpus, so
 430 * the new pages are installed with cmpxchg; if we lose the race then
 431 * simply free the page we allocated and use the one that's there.
 432 */
 433static bool alloc_p2m(unsigned long pfn)
 434{
 435        unsigned topidx, mididx;
 436        unsigned long ***top_p, **mid;
 437        unsigned long *top_mfn_p, *mid_mfn;
 438
 439        topidx = p2m_top_index(pfn);
 440        mididx = p2m_mid_index(pfn);
 441
 442        top_p = &p2m_top[topidx];
 443        mid = *top_p;
 444
 445        if (mid == p2m_mid_missing) {
 446                /* Mid level is missing, allocate a new one */
 447                mid = alloc_p2m_page();
 448                if (!mid)
 449                        return false;
 450
 451                p2m_mid_init(mid);
 452
 453                if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
 454                        free_p2m_page(mid);
 455        }
 456
 457        top_mfn_p = &p2m_top_mfn[topidx];
 458        mid_mfn = p2m_top_mfn_p[topidx];
 459
 460        BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
 461
 462        if (mid_mfn == p2m_mid_missing_mfn) {
 463                /* Separately check the mid mfn level */
 464                unsigned long missing_mfn;
 465                unsigned long mid_mfn_mfn;
 466
 467                mid_mfn = alloc_p2m_page();
 468                if (!mid_mfn)
 469                        return false;
 470
 471                p2m_mid_mfn_init(mid_mfn);
 472
 473                missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
 474                mid_mfn_mfn = virt_to_mfn(mid_mfn);
 475                if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn)
 476                        free_p2m_page(mid_mfn);
 477                else
 478                        p2m_top_mfn_p[topidx] = mid_mfn;
 479        }
 480
 481        if (p2m_top[topidx][mididx] == p2m_identity ||
 482            p2m_top[topidx][mididx] == p2m_missing) {
 483                /* p2m leaf page is missing */
 484                unsigned long *p2m;
 485                unsigned long *p2m_orig = p2m_top[topidx][mididx];
 486
 487                p2m = alloc_p2m_page();
 488                if (!p2m)
 489                        return false;
 490
 491                p2m_init(p2m);
 492
 493                if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)
 494                        free_p2m_page(p2m);
 495                else
 496                        mid_mfn[mididx] = virt_to_mfn(p2m);
 497        }
 498
 499        return true;
 500}
 501
 502static bool __init __early_alloc_p2m(unsigned long pfn)
 503{
 504        unsigned topidx, mididx, idx;
 505
 506        topidx = p2m_top_index(pfn);
 507        mididx = p2m_mid_index(pfn);
 508        idx = p2m_index(pfn);
 509
 510        /* Pfff.. No boundary cross-over, lets get out. */
 511        if (!idx)
 512                return false;
 513
 514        WARN(p2m_top[topidx][mididx] == p2m_identity,
 515                "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
 516                topidx, mididx);
 517
 518        /*
 519         * Could be done by xen_build_dynamic_phys_to_machine..
 520         */
 521        if (p2m_top[topidx][mididx] != p2m_missing)
 522                return false;
 523
 524        /* Boundary cross-over for the edges: */
 525        if (idx) {
 526                unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
 527                unsigned long *mid_mfn_p;
 528
 529                p2m_init(p2m);
 530
 531                p2m_top[topidx][mididx] = p2m;
 532
 533                /* For save/restore we need to MFN of the P2M saved */
 534                
 535                mid_mfn_p = p2m_top_mfn_p[topidx];
 536                WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
 537                        "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
 538                        topidx, mididx);
 539                mid_mfn_p[mididx] = virt_to_mfn(p2m);
 540
 541        }
 542        return idx != 0;
 543}
 544unsigned long __init set_phys_range_identity(unsigned long pfn_s,
 545                                      unsigned long pfn_e)
 546{
 547        unsigned long pfn;
 548
 549        if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN))
 550                return 0;
 551
 552        if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
 553                return pfn_e - pfn_s;
 554
 555        if (pfn_s > pfn_e)
 556                return 0;
 557
 558        for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1));
 559                pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
 560                pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
 561        {
 562                unsigned topidx = p2m_top_index(pfn);
 563                unsigned long *mid_mfn_p;
 564                unsigned long **mid;
 565
 566                mid = p2m_top[topidx];
 567                mid_mfn_p = p2m_top_mfn_p[topidx];
 568                if (mid == p2m_mid_missing) {
 569                        mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
 570
 571                        p2m_mid_init(mid);
 572
 573                        p2m_top[topidx] = mid;
 574
 575                        BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
 576                }
 577                /* And the save/restore P2M tables.. */
 578                if (mid_mfn_p == p2m_mid_missing_mfn) {
 579                        mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
 580                        p2m_mid_mfn_init(mid_mfn_p);
 581
 582                        p2m_top_mfn_p[topidx] = mid_mfn_p;
 583                        p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
 584                        /* Note: we don't set mid_mfn_p[midix] here,
 585                         * look in __early_alloc_p2m */
 586                }
 587        }
 588
 589        __early_alloc_p2m(pfn_s);
 590        __early_alloc_p2m(pfn_e);
 591
 592        for (pfn = pfn_s; pfn < pfn_e; pfn++)
 593                if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
 594                        break;
 595
 596        if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s),
 597                "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
 598                (pfn_e - pfn_s) - (pfn - pfn_s)))
 599                printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn);
 600
 601        return pfn - pfn_s;
 602}
 603
 604/* Try to install p2m mapping; fail if intermediate bits missing */
 605bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 606{
 607        unsigned topidx, mididx, idx;
 608
 609        if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
 610                BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
 611                return true;
 612        }
 613        if (unlikely(pfn >= MAX_P2M_PFN)) {
 614                BUG_ON(mfn != INVALID_P2M_ENTRY);
 615                return true;
 616        }
 617
 618        topidx = p2m_top_index(pfn);
 619        mididx = p2m_mid_index(pfn);
 620        idx = p2m_index(pfn);
 621
 622        /* For sparse holes were the p2m leaf has real PFN along with
 623         * PCI holes, stick in the PFN as the MFN value.
 624         */
 625        if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
 626                if (p2m_top[topidx][mididx] == p2m_identity)
 627                        return true;
 628
 629                /* Swap over from MISSING to IDENTITY if needed. */
 630                if (p2m_top[topidx][mididx] == p2m_missing) {
 631                        WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing,
 632                                p2m_identity) != p2m_missing);
 633                        return true;
 634                }
 635        }
 636
 637        if (p2m_top[topidx][mididx] == p2m_missing)
 638                return mfn == INVALID_P2M_ENTRY;
 639
 640        p2m_top[topidx][mididx][idx] = mfn;
 641
 642        return true;
 643}
 644
 645bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 646{
 647        if (unlikely(!__set_phys_to_machine(pfn, mfn)))  {
 648                if (!alloc_p2m(pfn))
 649                        return false;
 650
 651                if (!__set_phys_to_machine(pfn, mfn))
 652                        return false;
 653        }
 654
 655        return true;
 656}
 657
 658#define M2P_OVERRIDE_HASH_SHIFT 10
 659#define M2P_OVERRIDE_HASH       (1 << M2P_OVERRIDE_HASH_SHIFT)
 660
 661static RESERVE_BRK_ARRAY(struct list_head, m2p_overrides, M2P_OVERRIDE_HASH);
 662static DEFINE_SPINLOCK(m2p_override_lock);
 663
 664static void __init m2p_override_init(void)
 665{
 666        unsigned i;
 667
 668        m2p_overrides = extend_brk(sizeof(*m2p_overrides) * M2P_OVERRIDE_HASH,
 669                                   sizeof(unsigned long));
 670
 671        for (i = 0; i < M2P_OVERRIDE_HASH; i++)
 672                INIT_LIST_HEAD(&m2p_overrides[i]);
 673}
 674
 675static unsigned long mfn_hash(unsigned long mfn)
 676{
 677        return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
 678}
 679
 680/* Add an MFN override for a particular page */
 681int m2p_add_override(unsigned long mfn, struct page *page,
 682                struct gnttab_map_grant_ref *kmap_op)
 683{
 684        unsigned long flags;
 685        unsigned long pfn;
 686        unsigned long uninitialized_var(address);
 687        unsigned level;
 688        pte_t *ptep = NULL;
 689
 690        pfn = page_to_pfn(page);
 691        if (!PageHighMem(page)) {
 692                address = (unsigned long)__va(pfn << PAGE_SHIFT);
 693                ptep = lookup_address(address, &level);
 694                if (WARN(ptep == NULL || level != PG_LEVEL_4K,
 695                                        "m2p_add_override: pfn %lx not mapped", pfn))
 696                        return -EINVAL;
 697        }
 698        WARN_ON(PagePrivate(page));
 699        SetPagePrivate(page);
 700        set_page_private(page, mfn);
 701        page->index = pfn_to_mfn(pfn);
 702
 703        if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
 704                return -ENOMEM;
 705
 706        if (kmap_op != NULL) {
 707                if (!PageHighMem(page)) {
 708                        struct multicall_space mcs =
 709                                xen_mc_entry(sizeof(*kmap_op));
 710
 711                        MULTI_grant_table_op(mcs.mc,
 712                                        GNTTABOP_map_grant_ref, kmap_op, 1);
 713
 714                        xen_mc_issue(PARAVIRT_LAZY_MMU);
 715                }
 716                /* let's use dev_bus_addr to record the old mfn instead */
 717                kmap_op->dev_bus_addr = page->index;
 718                page->index = (unsigned long) kmap_op;
 719        }
 720        spin_lock_irqsave(&m2p_override_lock, flags);
 721        list_add(&page->lru,  &m2p_overrides[mfn_hash(mfn)]);
 722        spin_unlock_irqrestore(&m2p_override_lock, flags);
 723
 724        return 0;
 725}
 726EXPORT_SYMBOL_GPL(m2p_add_override);
 727int m2p_remove_override(struct page *page, bool clear_pte)
 728{
 729        unsigned long flags;
 730        unsigned long mfn;
 731        unsigned long pfn;
 732        unsigned long uninitialized_var(address);
 733        unsigned level;
 734        pte_t *ptep = NULL;
 735
 736        pfn = page_to_pfn(page);
 737        mfn = get_phys_to_machine(pfn);
 738        if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
 739                return -EINVAL;
 740
 741        if (!PageHighMem(page)) {
 742                address = (unsigned long)__va(pfn << PAGE_SHIFT);
 743                ptep = lookup_address(address, &level);
 744
 745                if (WARN(ptep == NULL || level != PG_LEVEL_4K,
 746                                        "m2p_remove_override: pfn %lx not mapped", pfn))
 747                        return -EINVAL;
 748        }
 749
 750        spin_lock_irqsave(&m2p_override_lock, flags);
 751        list_del(&page->lru);
 752        spin_unlock_irqrestore(&m2p_override_lock, flags);
 753        WARN_ON(!PagePrivate(page));
 754        ClearPagePrivate(page);
 755
 756        if (clear_pte) {
 757                struct gnttab_map_grant_ref *map_op =
 758                        (struct gnttab_map_grant_ref *) page->index;
 759                set_phys_to_machine(pfn, map_op->dev_bus_addr);
 760                if (!PageHighMem(page)) {
 761                        struct multicall_space mcs;
 762                        struct gnttab_unmap_grant_ref *unmap_op;
 763
 764                        /*
 765                         * It might be that we queued all the m2p grant table
 766                         * hypercalls in a multicall, then m2p_remove_override
 767                         * get called before the multicall has actually been
 768                         * issued. In this case handle is going to -1 because
 769                         * it hasn't been modified yet.
 770                         */
 771                        if (map_op->handle == -1)
 772                                xen_mc_flush();
 773                        /*
 774                         * Now if map_op->handle is negative it means that the
 775                         * hypercall actually returned an error.
 776                         */
 777                        if (map_op->handle == GNTST_general_error) {
 778                                printk(KERN_WARNING "m2p_remove_override: "
 779                                                "pfn %lx mfn %lx, failed to modify kernel mappings",
 780                                                pfn, mfn);
 781                                return -1;
 782                        }
 783
 784                        mcs = xen_mc_entry(
 785                                        sizeof(struct gnttab_unmap_grant_ref));
 786                        unmap_op = mcs.args;
 787                        unmap_op->host_addr = map_op->host_addr;
 788                        unmap_op->handle = map_op->handle;
 789                        unmap_op->dev_bus_addr = 0;
 790
 791                        MULTI_grant_table_op(mcs.mc,
 792                                        GNTTABOP_unmap_grant_ref, unmap_op, 1);
 793
 794                        xen_mc_issue(PARAVIRT_LAZY_MMU);
 795
 796                        set_pte_at(&init_mm, address, ptep,
 797                                        pfn_pte(pfn, PAGE_KERNEL));
 798                        __flush_tlb_single(address);
 799                        map_op->host_addr = 0;
 800                }
 801        } else
 802                set_phys_to_machine(pfn, page->index);
 803
 804        return 0;
 805}
 806EXPORT_SYMBOL_GPL(m2p_remove_override);
 807
 808struct page *m2p_find_override(unsigned long mfn)
 809{
 810        unsigned long flags;
 811        struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)];
 812        struct page *p, *ret;
 813
 814        ret = NULL;
 815
 816        spin_lock_irqsave(&m2p_override_lock, flags);
 817
 818        list_for_each_entry(p, bucket, lru) {
 819                if (page_private(p) == mfn) {
 820                        ret = p;
 821                        break;
 822                }
 823        }
 824
 825        spin_unlock_irqrestore(&m2p_override_lock, flags);
 826
 827        return ret;
 828}
 829
 830unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn)
 831{
 832        struct page *p = m2p_find_override(mfn);
 833        unsigned long ret = pfn;
 834
 835        if (p)
 836                ret = page_to_pfn(p);
 837
 838        return ret;
 839}
 840EXPORT_SYMBOL_GPL(m2p_find_override_pfn);
 841
 842#ifdef CONFIG_XEN_DEBUG_FS
 843#include <linux/debugfs.h>
 844#include "debugfs.h"
 845static int p2m_dump_show(struct seq_file *m, void *v)
 846{
 847        static const char * const level_name[] = { "top", "middle",
 848                                                "entry", "abnormal", "error"};
 849#define TYPE_IDENTITY 0
 850#define TYPE_MISSING 1
 851#define TYPE_PFN 2
 852#define TYPE_UNKNOWN 3
 853        static const char * const type_name[] = {
 854                                [TYPE_IDENTITY] = "identity",
 855                                [TYPE_MISSING] = "missing",
 856                                [TYPE_PFN] = "pfn",
 857                                [TYPE_UNKNOWN] = "abnormal"};
 858        unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0;
 859        unsigned int uninitialized_var(prev_level);
 860        unsigned int uninitialized_var(prev_type);
 861
 862        if (!p2m_top)
 863                return 0;
 864
 865        for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) {
 866                unsigned topidx = p2m_top_index(pfn);
 867                unsigned mididx = p2m_mid_index(pfn);
 868                unsigned idx = p2m_index(pfn);
 869                unsigned lvl, type;
 870
 871                lvl = 4;
 872                type = TYPE_UNKNOWN;
 873                if (p2m_top[topidx] == p2m_mid_missing) {
 874                        lvl = 0; type = TYPE_MISSING;
 875                } else if (p2m_top[topidx] == NULL) {
 876                        lvl = 0; type = TYPE_UNKNOWN;
 877                } else if (p2m_top[topidx][mididx] == NULL) {
 878                        lvl = 1; type = TYPE_UNKNOWN;
 879                } else if (p2m_top[topidx][mididx] == p2m_identity) {
 880                        lvl = 1; type = TYPE_IDENTITY;
 881                } else if (p2m_top[topidx][mididx] == p2m_missing) {
 882                        lvl = 1; type = TYPE_MISSING;
 883                } else if (p2m_top[topidx][mididx][idx] == 0) {
 884                        lvl = 2; type = TYPE_UNKNOWN;
 885                } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) {
 886                        lvl = 2; type = TYPE_IDENTITY;
 887                } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) {
 888                        lvl = 2; type = TYPE_MISSING;
 889                } else if (p2m_top[topidx][mididx][idx] == pfn) {
 890                        lvl = 2; type = TYPE_PFN;
 891                } else if (p2m_top[topidx][mididx][idx] != pfn) {
 892                        lvl = 2; type = TYPE_PFN;
 893                }
 894                if (pfn == 0) {
 895                        prev_level = lvl;
 896                        prev_type = type;
 897                }
 898                if (pfn == MAX_DOMAIN_PAGES-1) {
 899                        lvl = 3;
 900                        type = TYPE_UNKNOWN;
 901                }
 902                if (prev_type != type) {
 903                        seq_printf(m, " [0x%lx->0x%lx] %s\n",
 904                                prev_pfn_type, pfn, type_name[prev_type]);
 905                        prev_pfn_type = pfn;
 906                        prev_type = type;
 907                }
 908                if (prev_level != lvl) {
 909                        seq_printf(m, " [0x%lx->0x%lx] level %s\n",
 910                                prev_pfn_level, pfn, level_name[prev_level]);
 911                        prev_pfn_level = pfn;
 912                        prev_level = lvl;
 913                }
 914        }
 915        return 0;
 916#undef TYPE_IDENTITY
 917#undef TYPE_MISSING
 918#undef TYPE_PFN
 919#undef TYPE_UNKNOWN
 920}
 921
 922static int p2m_dump_open(struct inode *inode, struct file *filp)
 923{
 924        return single_open(filp, p2m_dump_show, NULL);
 925}
 926
 927static const struct file_operations p2m_dump_fops = {
 928        .open           = p2m_dump_open,
 929        .read           = seq_read,
 930        .llseek         = seq_lseek,
 931        .release        = single_release,
 932};
 933
 934static struct dentry *d_mmu_debug;
 935
 936static int __init xen_p2m_debugfs(void)
 937{
 938        struct dentry *d_xen = xen_init_debugfs();
 939
 940        if (d_xen == NULL)
 941                return -ENOMEM;
 942
 943        d_mmu_debug = debugfs_create_dir("mmu", d_xen);
 944
 945        debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
 946        return 0;
 947}
 948fs_initcall(xen_p2m_debugfs);
 949#endif /* CONFIG_XEN_DEBUG_FS */
 950