linux/arch/x86/kvm/paging_tmpl.h
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This module enables machines with Intel VT-x extensions to run virtual
   5 * machines without emulation or binary translation.
   6 *
   7 * MMU support
   8 *
   9 * Copyright (C) 2006 Qumranet, Inc.
  10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  11 *
  12 * Authors:
  13 *   Yaniv Kamay  <yaniv@qumranet.com>
  14 *   Avi Kivity   <avi@qumranet.com>
  15 *
  16 * This work is licensed under the terms of the GNU GPL, version 2.  See
  17 * the COPYING file in the top-level directory.
  18 *
  19 */
  20
  21/*
  22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  23 * so the code in this file is compiled twice, once per pte size.
  24 */
  25
  26/*
  27 * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro
  28 * uses for EPT without A/D paging type.
  29 */
  30extern u64 __pure __using_nonexistent_pte_bit(void)
  31               __compiletime_error("wrong use of PT_GUEST_(DIRTY|ACCESS)_SHIFT");
  32
  33#if PTTYPE == 64
  34        #define pt_element_t u64
  35        #define guest_walker guest_walker64
  36        #define FNAME(name) paging##64_##name
  37        #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  38        #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  39        #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  40        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  41        #define PT_LEVEL_BITS PT64_LEVEL_BITS
  42        #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
  43        #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
  44        #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
  45        #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
  46        #ifdef CONFIG_X86_64
  47        #define PT_MAX_FULL_LEVELS 4
  48        #define CMPXCHG cmpxchg
  49        #else
  50        #define CMPXCHG cmpxchg64
  51        #define PT_MAX_FULL_LEVELS 2
  52        #endif
  53#elif PTTYPE == 32
  54        #define pt_element_t u32
  55        #define guest_walker guest_walker32
  56        #define FNAME(name) paging##32_##name
  57        #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  58        #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
  59        #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
  60        #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  61        #define PT_LEVEL_BITS PT32_LEVEL_BITS
  62        #define PT_MAX_FULL_LEVELS 2
  63        #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
  64        #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
  65        #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
  66        #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
  67        #define CMPXCHG cmpxchg
  68#elif PTTYPE == PTTYPE_EPT
  69        #define pt_element_t u64
  70        #define guest_walker guest_walkerEPT
  71        #define FNAME(name) ept_##name
  72        #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  73        #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  74        #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  75        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  76        #define PT_LEVEL_BITS PT64_LEVEL_BITS
  77        #define PT_GUEST_ACCESSED_MASK 0
  78        #define PT_GUEST_DIRTY_MASK 0
  79        #define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit()
  80        #define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit()
  81        #define CMPXCHG cmpxchg64
  82        #define PT_MAX_FULL_LEVELS 4
  83#else
  84        #error Invalid PTTYPE value
  85#endif
  86
  87#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
  88#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
  89
  90/*
  91 * The guest_walker structure emulates the behavior of the hardware page
  92 * table walker.
  93 */
  94struct guest_walker {
  95        int level;
  96        unsigned max_level;
  97        gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  98        pt_element_t ptes[PT_MAX_FULL_LEVELS];
  99        pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
 100        gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
 101        pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
 102        bool pte_writable[PT_MAX_FULL_LEVELS];
 103        unsigned pt_access;
 104        unsigned pte_access;
 105        gfn_t gfn;
 106        struct x86_exception fault;
 107};
 108
 109static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
 110{
 111        return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
 112}
 113
 114static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
 115{
 116        unsigned mask;
 117
 118        /* dirty bit is not supported, so no need to track it */
 119        if (!PT_GUEST_DIRTY_MASK)
 120                return;
 121
 122        BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
 123
 124        mask = (unsigned)~ACC_WRITE_MASK;
 125        /* Allow write access to dirty gptes */
 126        mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
 127                PT_WRITABLE_MASK;
 128        *access &= mask;
 129}
 130
 131static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
 132{
 133        int bit7 = (gpte >> 7) & 1, low6 = gpte & 0x3f;
 134
 135        return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) |
 136                ((mmu->bad_mt_xwr & (1ull << low6)) != 0);
 137}
 138
 139static inline int FNAME(is_present_gpte)(unsigned long pte)
 140{
 141#if PTTYPE != PTTYPE_EPT
 142        return is_present_gpte(pte);
 143#else
 144        return pte & 7;
 145#endif
 146}
 147
 148static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 149                               pt_element_t __user *ptep_user, unsigned index,
 150                               pt_element_t orig_pte, pt_element_t new_pte)
 151{
 152        int npages;
 153        pt_element_t ret;
 154        pt_element_t *table;
 155        struct page *page;
 156
 157        npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
 158        /* Check if the user is doing something meaningless. */
 159        if (unlikely(npages != 1))
 160                return -EFAULT;
 161
 162        table = kmap_atomic(page);
 163        ret = CMPXCHG(&table[index], orig_pte, new_pte);
 164        kunmap_atomic(table);
 165
 166        kvm_release_page_dirty(page);
 167
 168        return (ret != orig_pte);
 169}
 170
 171static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
 172                                  struct kvm_mmu_page *sp, u64 *spte,
 173                                  u64 gpte)
 174{
 175        if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
 176                goto no_present;
 177
 178        if (!FNAME(is_present_gpte)(gpte))
 179                goto no_present;
 180
 181        /* if accessed bit is not supported prefetch non accessed gpte */
 182        if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
 183                goto no_present;
 184
 185        return false;
 186
 187no_present:
 188        drop_spte(vcpu->kvm, spte);
 189        return true;
 190}
 191
 192static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
 193{
 194        unsigned access;
 195#if PTTYPE == PTTYPE_EPT
 196        access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
 197                ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
 198                ACC_USER_MASK;
 199#else
 200        access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
 201        access &= ~(gpte >> PT64_NX_SHIFT);
 202#endif
 203
 204        return access;
 205}
 206
 207static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
 208                                             struct kvm_mmu *mmu,
 209                                             struct guest_walker *walker,
 210                                             int write_fault)
 211{
 212        unsigned level, index;
 213        pt_element_t pte, orig_pte;
 214        pt_element_t __user *ptep_user;
 215        gfn_t table_gfn;
 216        int ret;
 217
 218        /* dirty/accessed bits are not supported, so no need to update them */
 219        if (!PT_GUEST_DIRTY_MASK)
 220                return 0;
 221
 222        for (level = walker->max_level; level >= walker->level; --level) {
 223                pte = orig_pte = walker->ptes[level - 1];
 224                table_gfn = walker->table_gfn[level - 1];
 225                ptep_user = walker->ptep_user[level - 1];
 226                index = offset_in_page(ptep_user) / sizeof(pt_element_t);
 227                if (!(pte & PT_GUEST_ACCESSED_MASK)) {
 228                        trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
 229                        pte |= PT_GUEST_ACCESSED_MASK;
 230                }
 231                if (level == walker->level && write_fault &&
 232                                !(pte & PT_GUEST_DIRTY_MASK)) {
 233                        trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
 234                        pte |= PT_GUEST_DIRTY_MASK;
 235                }
 236                if (pte == orig_pte)
 237                        continue;
 238
 239                /*
 240                 * If the slot is read-only, simply do not process the accessed
 241                 * and dirty bits.  This is the correct thing to do if the slot
 242                 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
 243                 * are only supported if the accessed and dirty bits are already
 244                 * set in the ROM (so that MMIO writes are never needed).
 245                 *
 246                 * Note that NPT does not allow this at all and faults, since
 247                 * it always wants nested page table entries for the guest
 248                 * page tables to be writable.  And EPT works but will simply
 249                 * overwrite the read-only memory to set the accessed and dirty
 250                 * bits.
 251                 */
 252                if (unlikely(!walker->pte_writable[level - 1]))
 253                        continue;
 254
 255                ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
 256                if (ret)
 257                        return ret;
 258
 259                mark_page_dirty(vcpu->kvm, table_gfn);
 260                walker->ptes[level] = pte;
 261        }
 262        return 0;
 263}
 264
 265/*
 266 * Fetch a guest pte for a guest virtual address
 267 */
 268static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 269                                    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 270                                    gva_t addr, u32 access)
 271{
 272        int ret;
 273        pt_element_t pte;
 274        pt_element_t __user *uninitialized_var(ptep_user);
 275        gfn_t table_gfn;
 276        unsigned index, pt_access, pte_access, accessed_dirty;
 277        gpa_t pte_gpa;
 278        int offset;
 279        const int write_fault = access & PFERR_WRITE_MASK;
 280        const int user_fault  = access & PFERR_USER_MASK;
 281        const int fetch_fault = access & PFERR_FETCH_MASK;
 282        u16 errcode = 0;
 283        gpa_t real_gpa;
 284        gfn_t gfn;
 285
 286        trace_kvm_mmu_pagetable_walk(addr, access);
 287retry_walk:
 288        walker->level = mmu->root_level;
 289        pte           = mmu->get_cr3(vcpu);
 290
 291#if PTTYPE == 64
 292        if (walker->level == PT32E_ROOT_LEVEL) {
 293                pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
 294                trace_kvm_mmu_paging_element(pte, walker->level);
 295                if (!FNAME(is_present_gpte)(pte))
 296                        goto error;
 297                --walker->level;
 298        }
 299#endif
 300        walker->max_level = walker->level;
 301        ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
 302
 303        accessed_dirty = PT_GUEST_ACCESSED_MASK;
 304        pt_access = pte_access = ACC_ALL;
 305        ++walker->level;
 306
 307        do {
 308                gfn_t real_gfn;
 309                unsigned long host_addr;
 310
 311                pt_access &= pte_access;
 312                --walker->level;
 313
 314                index = PT_INDEX(addr, walker->level);
 315
 316                table_gfn = gpte_to_gfn(pte);
 317                offset    = index * sizeof(pt_element_t);
 318                pte_gpa   = gfn_to_gpa(table_gfn) + offset;
 319                walker->table_gfn[walker->level - 1] = table_gfn;
 320                walker->pte_gpa[walker->level - 1] = pte_gpa;
 321
 322                real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
 323                                              PFERR_USER_MASK|PFERR_WRITE_MASK,
 324                                              &walker->fault);
 325
 326                /*
 327                 * FIXME: This can happen if emulation (for of an INS/OUTS
 328                 * instruction) triggers a nested page fault.  The exit
 329                 * qualification / exit info field will incorrectly have
 330                 * "guest page access" as the nested page fault's cause,
 331                 * instead of "guest page structure access".  To fix this,
 332                 * the x86_exception struct should be augmented with enough
 333                 * information to fix the exit_qualification or exit_info_1
 334                 * fields.
 335                 */
 336                if (unlikely(real_gfn == UNMAPPED_GVA))
 337                        return 0;
 338
 339                real_gfn = gpa_to_gfn(real_gfn);
 340
 341                host_addr = gfn_to_hva_prot(vcpu->kvm, real_gfn,
 342                                            &walker->pte_writable[walker->level - 1]);
 343                if (unlikely(kvm_is_error_hva(host_addr)))
 344                        goto error;
 345
 346                ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
 347                if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
 348                        goto error;
 349                walker->ptep_user[walker->level - 1] = ptep_user;
 350
 351                trace_kvm_mmu_paging_element(pte, walker->level);
 352
 353                if (unlikely(!FNAME(is_present_gpte)(pte)))
 354                        goto error;
 355
 356                if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte,
 357                                                     walker->level))) {
 358                        errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
 359                        goto error;
 360                }
 361
 362                accessed_dirty &= pte;
 363                pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
 364
 365                walker->ptes[walker->level - 1] = pte;
 366        } while (!is_last_gpte(mmu, walker->level, pte));
 367
 368        if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) {
 369                errcode |= PFERR_PRESENT_MASK;
 370                goto error;
 371        }
 372
 373        gfn = gpte_to_gfn_lvl(pte, walker->level);
 374        gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
 375
 376        if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
 377                gfn += pse36_gfn_delta(pte);
 378
 379        real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
 380        if (real_gpa == UNMAPPED_GVA)
 381                return 0;
 382
 383        walker->gfn = real_gpa >> PAGE_SHIFT;
 384
 385        if (!write_fault)
 386                FNAME(protect_clean_gpte)(&pte_access, pte);
 387        else
 388                /*
 389                 * On a write fault, fold the dirty bit into accessed_dirty.
 390                 * For modes without A/D bits support accessed_dirty will be
 391                 * always clear.
 392                 */
 393                accessed_dirty &= pte >>
 394                        (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
 395
 396        if (unlikely(!accessed_dirty)) {
 397                ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
 398                if (unlikely(ret < 0))
 399                        goto error;
 400                else if (ret)
 401                        goto retry_walk;
 402        }
 403
 404        walker->pt_access = pt_access;
 405        walker->pte_access = pte_access;
 406        pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
 407                 __func__, (u64)pte, pte_access, pt_access);
 408        return 1;
 409
 410error:
 411        errcode |= write_fault | user_fault;
 412        if (fetch_fault && (mmu->nx ||
 413                            kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
 414                errcode |= PFERR_FETCH_MASK;
 415
 416        walker->fault.vector = PF_VECTOR;
 417        walker->fault.error_code_valid = true;
 418        walker->fault.error_code = errcode;
 419
 420#if PTTYPE == PTTYPE_EPT
 421        /*
 422         * Use PFERR_RSVD_MASK in error_code to to tell if EPT
 423         * misconfiguration requires to be injected. The detection is
 424         * done by is_rsvd_bits_set() above.
 425         *
 426         * We set up the value of exit_qualification to inject:
 427         * [2:0] - Derive from [2:0] of real exit_qualification at EPT violation
 428         * [5:3] - Calculated by the page walk of the guest EPT page tables
 429         * [7:8] - Derived from [7:8] of real exit_qualification
 430         *
 431         * The other bits are set to 0.
 432         */
 433        if (!(errcode & PFERR_RSVD_MASK)) {
 434                vcpu->arch.exit_qualification &= 0x187;
 435                vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
 436        }
 437#endif
 438        walker->fault.address = addr;
 439        walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
 440
 441        trace_kvm_mmu_walker_error(walker->fault.error_code);
 442        return 0;
 443}
 444
 445static int FNAME(walk_addr)(struct guest_walker *walker,
 446                            struct kvm_vcpu *vcpu, gva_t addr, u32 access)
 447{
 448        return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
 449                                        access);
 450}
 451
 452#if PTTYPE != PTTYPE_EPT
 453static int FNAME(walk_addr_nested)(struct guest_walker *walker,
 454                                   struct kvm_vcpu *vcpu, gva_t addr,
 455                                   u32 access)
 456{
 457        return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
 458                                        addr, access);
 459}
 460#endif
 461
 462static bool
 463FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 464                     u64 *spte, pt_element_t gpte, bool no_dirty_log)
 465{
 466        unsigned pte_access;
 467        gfn_t gfn;
 468        pfn_t pfn;
 469
 470        if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 471                return false;
 472
 473        pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 474
 475        gfn = gpte_to_gfn(gpte);
 476        pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
 477        FNAME(protect_clean_gpte)(&pte_access, gpte);
 478        pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
 479                        no_dirty_log && (pte_access & ACC_WRITE_MASK));
 480        if (is_error_pfn(pfn))
 481                return false;
 482
 483        /*
 484         * we call mmu_set_spte() with host_writable = true because
 485         * pte_prefetch_gfn_to_pfn always gets a writable pfn.
 486         */
 487        mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL,
 488                     gfn, pfn, true, true);
 489
 490        return true;
 491}
 492
 493static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 494                              u64 *spte, const void *pte)
 495{
 496        pt_element_t gpte = *(const pt_element_t *)pte;
 497
 498        FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
 499}
 500
 501static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
 502                                struct guest_walker *gw, int level)
 503{
 504        pt_element_t curr_pte;
 505        gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
 506        u64 mask;
 507        int r, index;
 508
 509        if (level == PT_PAGE_TABLE_LEVEL) {
 510                mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
 511                base_gpa = pte_gpa & ~mask;
 512                index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
 513
 514                r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
 515                                gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
 516                curr_pte = gw->prefetch_ptes[index];
 517        } else
 518                r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
 519                                  &curr_pte, sizeof(curr_pte));
 520
 521        return r || curr_pte != gw->ptes[level - 1];
 522}
 523
 524static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 525                                u64 *sptep)
 526{
 527        struct kvm_mmu_page *sp;
 528        pt_element_t *gptep = gw->prefetch_ptes;
 529        u64 *spte;
 530        int i;
 531
 532        sp = page_header(__pa(sptep));
 533
 534        if (sp->role.level > PT_PAGE_TABLE_LEVEL)
 535                return;
 536
 537        if (sp->role.direct)
 538                return __direct_pte_prefetch(vcpu, sp, sptep);
 539
 540        i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
 541        spte = sp->spt + i;
 542
 543        for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
 544                if (spte == sptep)
 545                        continue;
 546
 547                if (is_shadow_present_pte(*spte))
 548                        continue;
 549
 550                if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
 551                        break;
 552        }
 553}
 554
 555/*
 556 * Fetch a shadow pte for a specific level in the paging hierarchy.
 557 * If the guest tries to write a write-protected page, we need to
 558 * emulate this operation, return 1 to indicate this case.
 559 */
 560static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 561                         struct guest_walker *gw,
 562                         int write_fault, int hlevel,
 563                         pfn_t pfn, bool map_writable, bool prefault)
 564{
 565        struct kvm_mmu_page *sp = NULL;
 566        struct kvm_shadow_walk_iterator it;
 567        unsigned direct_access, access = gw->pt_access;
 568        int top_level, emulate = 0;
 569
 570        direct_access = gw->pte_access;
 571
 572        top_level = vcpu->arch.mmu.root_level;
 573        if (top_level == PT32E_ROOT_LEVEL)
 574                top_level = PT32_ROOT_LEVEL;
 575        /*
 576         * Verify that the top-level gpte is still there.  Since the page
 577         * is a root page, it is either write protected (and cannot be
 578         * changed from now on) or it is invalid (in which case, we don't
 579         * really care if it changes underneath us after this point).
 580         */
 581        if (FNAME(gpte_changed)(vcpu, gw, top_level))
 582                goto out_gpte_changed;
 583
 584        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
 585                goto out_gpte_changed;
 586
 587        for (shadow_walk_init(&it, vcpu, addr);
 588             shadow_walk_okay(&it) && it.level > gw->level;
 589             shadow_walk_next(&it)) {
 590                gfn_t table_gfn;
 591
 592                clear_sp_write_flooding_count(it.sptep);
 593                drop_large_spte(vcpu, it.sptep);
 594
 595                sp = NULL;
 596                if (!is_shadow_present_pte(*it.sptep)) {
 597                        table_gfn = gw->table_gfn[it.level - 2];
 598                        sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
 599                                              false, access, it.sptep);
 600                }
 601
 602                /*
 603                 * Verify that the gpte in the page we've just write
 604                 * protected is still there.
 605                 */
 606                if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
 607                        goto out_gpte_changed;
 608
 609                if (sp)
 610                        link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK);
 611        }
 612
 613        for (;
 614             shadow_walk_okay(&it) && it.level > hlevel;
 615             shadow_walk_next(&it)) {
 616                gfn_t direct_gfn;
 617
 618                clear_sp_write_flooding_count(it.sptep);
 619                validate_direct_spte(vcpu, it.sptep, direct_access);
 620
 621                drop_large_spte(vcpu, it.sptep);
 622
 623                if (is_shadow_present_pte(*it.sptep))
 624                        continue;
 625
 626                direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
 627
 628                sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
 629                                      true, direct_access, it.sptep);
 630                link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK);
 631        }
 632
 633        clear_sp_write_flooding_count(it.sptep);
 634        mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate,
 635                     it.level, gw->gfn, pfn, prefault, map_writable);
 636        FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 637
 638        return emulate;
 639
 640out_gpte_changed:
 641        if (sp)
 642                kvm_mmu_put_page(sp, it.sptep);
 643        kvm_release_pfn_clean(pfn);
 644        return 0;
 645}
 646
 647 /*
 648 * To see whether the mapped gfn can write its page table in the current
 649 * mapping.
 650 *
 651 * It is the helper function of FNAME(page_fault). When guest uses large page
 652 * size to map the writable gfn which is used as current page table, we should
 653 * force kvm to use small page size to map it because new shadow page will be
 654 * created when kvm establishes shadow page table that stop kvm using large
 655 * page size. Do it early can avoid unnecessary #PF and emulation.
 656 *
 657 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
 658 * currently used as its page table.
 659 *
 660 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
 661 * since the PDPT is always shadowed, that means, we can not use large page
 662 * size to map the gfn which is used as PDPT.
 663 */
 664static bool
 665FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
 666                              struct guest_walker *walker, int user_fault,
 667                              bool *write_fault_to_shadow_pgtable)
 668{
 669        int level;
 670        gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
 671        bool self_changed = false;
 672
 673        if (!(walker->pte_access & ACC_WRITE_MASK ||
 674              (!is_write_protection(vcpu) && !user_fault)))
 675                return false;
 676
 677        for (level = walker->level; level <= walker->max_level; level++) {
 678                gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
 679
 680                self_changed |= !(gfn & mask);
 681                *write_fault_to_shadow_pgtable |= !gfn;
 682        }
 683
 684        return self_changed;
 685}
 686
 687/*
 688 * Page fault handler.  There are several causes for a page fault:
 689 *   - there is no shadow pte for the guest pte
 690 *   - write access through a shadow pte marked read only so that we can set
 691 *     the dirty bit
 692 *   - write access to a shadow pte marked read only so we can update the page
 693 *     dirty bitmap, when userspace requests it
 694 *   - mmio access; in this case we will never install a present shadow pte
 695 *   - normal guest page fault due to the guest pte marked not present, not
 696 *     writable, or not executable
 697 *
 698 *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
 699 *           a negative value on error.
 700 */
 701static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 702                             bool prefault)
 703{
 704        int write_fault = error_code & PFERR_WRITE_MASK;
 705        int user_fault = error_code & PFERR_USER_MASK;
 706        struct guest_walker walker;
 707        int r;
 708        pfn_t pfn;
 709        int level = PT_PAGE_TABLE_LEVEL;
 710        int force_pt_level;
 711        unsigned long mmu_seq;
 712        bool map_writable, is_self_change_mapping;
 713
 714        pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 715
 716        if (unlikely(error_code & PFERR_RSVD_MASK)) {
 717                r = handle_mmio_page_fault(vcpu, addr, error_code,
 718                                              mmu_is_nested(vcpu));
 719                if (likely(r != RET_MMIO_PF_INVALID))
 720                        return r;
 721
 722                /*
 723                 * page fault with PFEC.RSVD  = 1 is caused by shadow
 724                 * page fault, should not be used to walk guest page
 725                 * table.
 726                 */
 727                error_code &= ~PFERR_RSVD_MASK;
 728        };
 729
 730        r = mmu_topup_memory_caches(vcpu);
 731        if (r)
 732                return r;
 733
 734        /*
 735         * Look up the guest pte for the faulting address.
 736         */
 737        r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
 738
 739        /*
 740         * The page is not mapped by the guest.  Let the guest handle it.
 741         */
 742        if (!r) {
 743                pgprintk("%s: guest page fault\n", __func__);
 744                if (!prefault)
 745                        inject_page_fault(vcpu, &walker.fault);
 746
 747                return 0;
 748        }
 749
 750        vcpu->arch.write_fault_to_shadow_pgtable = false;
 751
 752        is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
 753              &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
 754
 755        if (walker.level >= PT_DIRECTORY_LEVEL)
 756                force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
 757                   || is_self_change_mapping;
 758        else
 759                force_pt_level = 1;
 760        if (!force_pt_level) {
 761                level = min(walker.level, mapping_level(vcpu, walker.gfn));
 762                walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
 763        }
 764
 765        mmu_seq = vcpu->kvm->mmu_notifier_seq;
 766        smp_rmb();
 767
 768        if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
 769                         &map_writable))
 770                return 0;
 771
 772        if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
 773                                walker.gfn, pfn, walker.pte_access, &r))
 774                return r;
 775
 776        /*
 777         * Do not change pte_access if the pfn is a mmio page, otherwise
 778         * we will cache the incorrect access into mmio spte.
 779         */
 780        if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
 781             !is_write_protection(vcpu) && !user_fault &&
 782              !is_noslot_pfn(pfn)) {
 783                walker.pte_access |= ACC_WRITE_MASK;
 784                walker.pte_access &= ~ACC_USER_MASK;
 785
 786                /*
 787                 * If we converted a user page to a kernel page,
 788                 * so that the kernel can write to it when cr0.wp=0,
 789                 * then we should prevent the kernel from executing it
 790                 * if SMEP is enabled.
 791                 */
 792                if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
 793                        walker.pte_access &= ~ACC_EXEC_MASK;
 794        }
 795
 796        spin_lock(&vcpu->kvm->mmu_lock);
 797        if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
 798                goto out_unlock;
 799
 800        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
 801        make_mmu_pages_available(vcpu);
 802        if (!force_pt_level)
 803                transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
 804        r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
 805                         level, pfn, map_writable, prefault);
 806        ++vcpu->stat.pf_fixed;
 807        kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 808        spin_unlock(&vcpu->kvm->mmu_lock);
 809
 810        return r;
 811
 812out_unlock:
 813        spin_unlock(&vcpu->kvm->mmu_lock);
 814        kvm_release_pfn_clean(pfn);
 815        return 0;
 816}
 817
 818static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
 819{
 820        int offset = 0;
 821
 822        WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
 823
 824        if (PTTYPE == 32)
 825                offset = sp->role.quadrant << PT64_LEVEL_BITS;
 826
 827        return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
 828}
 829
 830static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 831{
 832        struct kvm_shadow_walk_iterator iterator;
 833        struct kvm_mmu_page *sp;
 834        int level;
 835        u64 *sptep;
 836
 837        vcpu_clear_mmio_info(vcpu, gva);
 838
 839        /*
 840         * No need to check return value here, rmap_can_add() can
 841         * help us to skip pte prefetch later.
 842         */
 843        mmu_topup_memory_caches(vcpu);
 844
 845        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
 846                WARN_ON(1);
 847                return;
 848        }
 849
 850        spin_lock(&vcpu->kvm->mmu_lock);
 851        for_each_shadow_entry(vcpu, gva, iterator) {
 852                level = iterator.level;
 853                sptep = iterator.sptep;
 854
 855                sp = page_header(__pa(sptep));
 856                if (is_last_spte(*sptep, level)) {
 857                        pt_element_t gpte;
 858                        gpa_t pte_gpa;
 859
 860                        if (!sp->unsync)
 861                                break;
 862
 863                        pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 864                        pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 865
 866                        if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
 867                                kvm_flush_remote_tlbs(vcpu->kvm);
 868
 869                        if (!rmap_can_add(vcpu))
 870                                break;
 871
 872                        if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
 873                                                  sizeof(pt_element_t)))
 874                                break;
 875
 876                        FNAME(update_pte)(vcpu, sp, sptep, &gpte);
 877                }
 878
 879                if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
 880                        break;
 881        }
 882        spin_unlock(&vcpu->kvm->mmu_lock);
 883}
 884
 885static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
 886                               struct x86_exception *exception)
 887{
 888        struct guest_walker walker;
 889        gpa_t gpa = UNMAPPED_GVA;
 890        int r;
 891
 892        r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
 893
 894        if (r) {
 895                gpa = gfn_to_gpa(walker.gfn);
 896                gpa |= vaddr & ~PAGE_MASK;
 897        } else if (exception)
 898                *exception = walker.fault;
 899
 900        return gpa;
 901}
 902
 903#if PTTYPE != PTTYPE_EPT
 904static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
 905                                      u32 access,
 906                                      struct x86_exception *exception)
 907{
 908        struct guest_walker walker;
 909        gpa_t gpa = UNMAPPED_GVA;
 910        int r;
 911
 912        r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
 913
 914        if (r) {
 915                gpa = gfn_to_gpa(walker.gfn);
 916                gpa |= vaddr & ~PAGE_MASK;
 917        } else if (exception)
 918                *exception = walker.fault;
 919
 920        return gpa;
 921}
 922#endif
 923
 924/*
 925 * Using the cached information from sp->gfns is safe because:
 926 * - The spte has a reference to the struct page, so the pfn for a given gfn
 927 *   can't change unless all sptes pointing to it are nuked first.
 928 *
 929 * Note:
 930 *   We should flush all tlbs if spte is dropped even though guest is
 931 *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
 932 *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
 933 *   used by guest then tlbs are not flushed, so guest is allowed to access the
 934 *   freed pages.
 935 *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
 936 */
 937static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 938{
 939        int i, nr_present = 0;
 940        bool host_writable;
 941        gpa_t first_pte_gpa;
 942
 943        /* direct kvm_mmu_page can not be unsync. */
 944        BUG_ON(sp->role.direct);
 945
 946        first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 947
 948        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
 949                unsigned pte_access;
 950                pt_element_t gpte;
 951                gpa_t pte_gpa;
 952                gfn_t gfn;
 953
 954                if (!sp->spt[i])
 955                        continue;
 956
 957                pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 958
 959                if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
 960                                          sizeof(pt_element_t)))
 961                        return -EINVAL;
 962
 963                if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
 964                        vcpu->kvm->tlbs_dirty++;
 965                        continue;
 966                }
 967
 968                gfn = gpte_to_gfn(gpte);
 969                pte_access = sp->role.access;
 970                pte_access &= FNAME(gpte_access)(vcpu, gpte);
 971                FNAME(protect_clean_gpte)(&pte_access, gpte);
 972
 973                if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
 974                      &nr_present))
 975                        continue;
 976
 977                if (gfn != sp->gfns[i]) {
 978                        drop_spte(vcpu->kvm, &sp->spt[i]);
 979                        vcpu->kvm->tlbs_dirty++;
 980                        continue;
 981                }
 982
 983                nr_present++;
 984
 985                host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
 986
 987                set_spte(vcpu, &sp->spt[i], pte_access,
 988                         PT_PAGE_TABLE_LEVEL, gfn,
 989                         spte_to_pfn(sp->spt[i]), true, false,
 990                         host_writable);
 991        }
 992
 993        return !nr_present;
 994}
 995
 996#undef pt_element_t
 997#undef guest_walker
 998#undef FNAME
 999#undef PT_BASE_ADDR_MASK
1000#undef PT_INDEX
1001#undef PT_LVL_ADDR_MASK
1002#undef PT_LVL_OFFSET_MASK
1003#undef PT_LEVEL_BITS
1004#undef PT_MAX_FULL_LEVELS
1005#undef gpte_to_gfn
1006#undef gpte_to_gfn_lvl
1007#undef CMPXCHG
1008#undef PT_GUEST_ACCESSED_MASK
1009#undef PT_GUEST_DIRTY_MASK
1010#undef PT_GUEST_DIRTY_SHIFT
1011#undef PT_GUEST_ACCESSED_SHIFT
1012