linux/arch/x86/kvm/paging_tmpl.h
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This module enables machines with Intel VT-x extensions to run virtual
   5 * machines without emulation or binary translation.
   6 *
   7 * MMU support
   8 *
   9 * Copyright (C) 2006 Qumranet, Inc.
  10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  11 *
  12 * Authors:
  13 *   Yaniv Kamay  <yaniv@qumranet.com>
  14 *   Avi Kivity   <avi@qumranet.com>
  15 *
  16 * This work is licensed under the terms of the GNU GPL, version 2.  See
  17 * the COPYING file in the top-level directory.
  18 *
  19 */
  20
  21/*
  22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  23 * so the code in this file is compiled twice, once per pte size.
  24 */
  25
  26/*
  27 * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro
  28 * uses for EPT without A/D paging type.
  29 */
  30extern u64 __pure __using_nonexistent_pte_bit(void)
  31               __compiletime_error("wrong use of PT_GUEST_(DIRTY|ACCESS)_SHIFT");
  32
  33#if PTTYPE == 64
  34        #define pt_element_t u64
  35        #define guest_walker guest_walker64
  36        #define FNAME(name) paging##64_##name
  37        #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  38        #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  39        #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  40        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  41        #define PT_LEVEL_BITS PT64_LEVEL_BITS
  42        #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
  43        #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
  44        #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
  45        #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
  46        #ifdef CONFIG_X86_64
  47        #define PT_MAX_FULL_LEVELS 4
  48        #define CMPXCHG cmpxchg
  49        #else
  50        #define CMPXCHG cmpxchg64
  51        #define PT_MAX_FULL_LEVELS 2
  52        #endif
  53#elif PTTYPE == 32
  54        #define pt_element_t u32
  55        #define guest_walker guest_walker32
  56        #define FNAME(name) paging##32_##name
  57        #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  58        #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
  59        #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
  60        #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  61        #define PT_LEVEL_BITS PT32_LEVEL_BITS
  62        #define PT_MAX_FULL_LEVELS 2
  63        #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
  64        #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
  65        #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
  66        #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
  67        #define CMPXCHG cmpxchg
  68#elif PTTYPE == PTTYPE_EPT
  69        #define pt_element_t u64
  70        #define guest_walker guest_walkerEPT
  71        #define FNAME(name) ept_##name
  72        #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  73        #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  74        #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  75        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  76        #define PT_LEVEL_BITS PT64_LEVEL_BITS
  77        #define PT_GUEST_ACCESSED_MASK 0
  78        #define PT_GUEST_DIRTY_MASK 0
  79        #define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit()
  80        #define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit()
  81        #define CMPXCHG cmpxchg64
  82        #define PT_MAX_FULL_LEVELS 4
  83#else
  84        #error Invalid PTTYPE value
  85#endif
  86
  87#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
  88#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
  89
  90/*
  91 * The guest_walker structure emulates the behavior of the hardware page
  92 * table walker.
  93 */
  94struct guest_walker {
  95        int level;
  96        unsigned max_level;
  97        gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  98        pt_element_t ptes[PT_MAX_FULL_LEVELS];
  99        pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
 100        gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
 101        pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
 102        bool pte_writable[PT_MAX_FULL_LEVELS];
 103        unsigned pt_access;
 104        unsigned pte_access;
 105        gfn_t gfn;
 106        struct x86_exception fault;
 107};
 108
 109static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
 110{
 111        return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
 112}
 113
 114static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
 115{
 116        unsigned mask;
 117
 118        /* dirty bit is not supported, so no need to track it */
 119        if (!PT_GUEST_DIRTY_MASK)
 120                return;
 121
 122        BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
 123
 124        mask = (unsigned)~ACC_WRITE_MASK;
 125        /* Allow write access to dirty gptes */
 126        mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
 127                PT_WRITABLE_MASK;
 128        *access &= mask;
 129}
 130
 131static inline int FNAME(is_present_gpte)(unsigned long pte)
 132{
 133#if PTTYPE != PTTYPE_EPT
 134        return is_present_gpte(pte);
 135#else
 136        return pte & 7;
 137#endif
 138}
 139
 140static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 141                               pt_element_t __user *ptep_user, unsigned index,
 142                               pt_element_t orig_pte, pt_element_t new_pte)
 143{
 144        int npages;
 145        pt_element_t ret;
 146        pt_element_t *table;
 147        struct page *page;
 148
 149        npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
 150        /* Check if the user is doing something meaningless. */
 151        if (unlikely(npages != 1))
 152                return -EFAULT;
 153
 154        table = kmap_atomic(page);
 155        ret = CMPXCHG(&table[index], orig_pte, new_pte);
 156        kunmap_atomic(table);
 157
 158        kvm_release_page_dirty(page);
 159
 160        return (ret != orig_pte);
 161}
 162
 163static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
 164                                  struct kvm_mmu_page *sp, u64 *spte,
 165                                  u64 gpte)
 166{
 167        if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
 168                goto no_present;
 169
 170        if (!FNAME(is_present_gpte)(gpte))
 171                goto no_present;
 172
 173        /* if accessed bit is not supported prefetch non accessed gpte */
 174        if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
 175                goto no_present;
 176
 177        return false;
 178
 179no_present:
 180        drop_spte(vcpu->kvm, spte);
 181        return true;
 182}
 183
 184static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
 185{
 186        unsigned access;
 187#if PTTYPE == PTTYPE_EPT
 188        access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
 189                ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
 190                ACC_USER_MASK;
 191#else
 192        BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
 193        BUILD_BUG_ON(ACC_EXEC_MASK != 1);
 194        access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
 195        /* Combine NX with P (which is set here) to get ACC_EXEC_MASK.  */
 196        access ^= (gpte >> PT64_NX_SHIFT);
 197#endif
 198
 199        return access;
 200}
 201
 202static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
 203                                             struct kvm_mmu *mmu,
 204                                             struct guest_walker *walker,
 205                                             int write_fault)
 206{
 207        unsigned level, index;
 208        pt_element_t pte, orig_pte;
 209        pt_element_t __user *ptep_user;
 210        gfn_t table_gfn;
 211        int ret;
 212
 213        /* dirty/accessed bits are not supported, so no need to update them */
 214        if (!PT_GUEST_DIRTY_MASK)
 215                return 0;
 216
 217        for (level = walker->max_level; level >= walker->level; --level) {
 218                pte = orig_pte = walker->ptes[level - 1];
 219                table_gfn = walker->table_gfn[level - 1];
 220                ptep_user = walker->ptep_user[level - 1];
 221                index = offset_in_page(ptep_user) / sizeof(pt_element_t);
 222                if (!(pte & PT_GUEST_ACCESSED_MASK)) {
 223                        trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
 224                        pte |= PT_GUEST_ACCESSED_MASK;
 225                }
 226                if (level == walker->level && write_fault &&
 227                                !(pte & PT_GUEST_DIRTY_MASK)) {
 228                        trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
 229                        pte |= PT_GUEST_DIRTY_MASK;
 230                }
 231                if (pte == orig_pte)
 232                        continue;
 233
 234                /*
 235                 * If the slot is read-only, simply do not process the accessed
 236                 * and dirty bits.  This is the correct thing to do if the slot
 237                 * is ROM, and page tables in read-as-ROM/write-as-MMIO slots
 238                 * are only supported if the accessed and dirty bits are already
 239                 * set in the ROM (so that MMIO writes are never needed).
 240                 *
 241                 * Note that NPT does not allow this at all and faults, since
 242                 * it always wants nested page table entries for the guest
 243                 * page tables to be writable.  And EPT works but will simply
 244                 * overwrite the read-only memory to set the accessed and dirty
 245                 * bits.
 246                 */
 247                if (unlikely(!walker->pte_writable[level - 1]))
 248                        continue;
 249
 250                ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
 251                if (ret)
 252                        return ret;
 253
 254                kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
 255                walker->ptes[level - 1] = pte;
 256        }
 257        return 0;
 258}
 259
 260static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
 261{
 262        unsigned pkeys = 0;
 263#if PTTYPE == 64
 264        pte_t pte = {.pte = gpte};
 265
 266        pkeys = pte_flags_pkey(pte_flags(pte));
 267#endif
 268        return pkeys;
 269}
 270
 271/*
 272 * Fetch a guest pte for a guest virtual address
 273 */
 274static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 275                                    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 276                                    gva_t addr, u32 access)
 277{
 278        int ret;
 279        pt_element_t pte;
 280        pt_element_t __user *uninitialized_var(ptep_user);
 281        gfn_t table_gfn;
 282        unsigned index, pt_access, pte_access, accessed_dirty, pte_pkey;
 283        gpa_t pte_gpa;
 284        int offset;
 285        const int write_fault = access & PFERR_WRITE_MASK;
 286        const int user_fault  = access & PFERR_USER_MASK;
 287        const int fetch_fault = access & PFERR_FETCH_MASK;
 288        u16 errcode = 0;
 289        gpa_t real_gpa;
 290        gfn_t gfn;
 291
 292        trace_kvm_mmu_pagetable_walk(addr, access);
 293retry_walk:
 294        walker->level = mmu->root_level;
 295        pte           = mmu->get_cr3(vcpu);
 296
 297#if PTTYPE == 64
 298        if (walker->level == PT32E_ROOT_LEVEL) {
 299                pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
 300                trace_kvm_mmu_paging_element(pte, walker->level);
 301                if (!FNAME(is_present_gpte)(pte))
 302                        goto error;
 303                --walker->level;
 304        }
 305#endif
 306        walker->max_level = walker->level;
 307        ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
 308
 309        accessed_dirty = PT_GUEST_ACCESSED_MASK;
 310        pt_access = pte_access = ACC_ALL;
 311        ++walker->level;
 312
 313        do {
 314                gfn_t real_gfn;
 315                unsigned long host_addr;
 316
 317                pt_access &= pte_access;
 318                --walker->level;
 319
 320                index = PT_INDEX(addr, walker->level);
 321
 322                table_gfn = gpte_to_gfn(pte);
 323                offset    = index * sizeof(pt_element_t);
 324                pte_gpa   = gfn_to_gpa(table_gfn) + offset;
 325                walker->table_gfn[walker->level - 1] = table_gfn;
 326                walker->pte_gpa[walker->level - 1] = pte_gpa;
 327
 328                real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
 329                                              PFERR_USER_MASK|PFERR_WRITE_MASK,
 330                                              &walker->fault);
 331
 332                /*
 333                 * FIXME: This can happen if emulation (for of an INS/OUTS
 334                 * instruction) triggers a nested page fault.  The exit
 335                 * qualification / exit info field will incorrectly have
 336                 * "guest page access" as the nested page fault's cause,
 337                 * instead of "guest page structure access".  To fix this,
 338                 * the x86_exception struct should be augmented with enough
 339                 * information to fix the exit_qualification or exit_info_1
 340                 * fields.
 341                 */
 342                if (unlikely(real_gfn == UNMAPPED_GVA))
 343                        return 0;
 344
 345                real_gfn = gpa_to_gfn(real_gfn);
 346
 347                host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
 348                                            &walker->pte_writable[walker->level - 1]);
 349                if (unlikely(kvm_is_error_hva(host_addr)))
 350                        goto error;
 351
 352                ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
 353                if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
 354                        goto error;
 355                walker->ptep_user[walker->level - 1] = ptep_user;
 356
 357                trace_kvm_mmu_paging_element(pte, walker->level);
 358
 359                if (unlikely(!FNAME(is_present_gpte)(pte)))
 360                        goto error;
 361
 362                if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
 363                        errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
 364                        goto error;
 365                }
 366
 367                accessed_dirty &= pte;
 368                pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
 369
 370                walker->ptes[walker->level - 1] = pte;
 371        } while (!is_last_gpte(mmu, walker->level, pte));
 372
 373        pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
 374        errcode = permission_fault(vcpu, mmu, pte_access, pte_pkey, access);
 375        if (unlikely(errcode))
 376                goto error;
 377
 378        gfn = gpte_to_gfn_lvl(pte, walker->level);
 379        gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
 380
 381        if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
 382                gfn += pse36_gfn_delta(pte);
 383
 384        real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
 385        if (real_gpa == UNMAPPED_GVA)
 386                return 0;
 387
 388        walker->gfn = real_gpa >> PAGE_SHIFT;
 389
 390        if (!write_fault)
 391                FNAME(protect_clean_gpte)(&pte_access, pte);
 392        else
 393                /*
 394                 * On a write fault, fold the dirty bit into accessed_dirty.
 395                 * For modes without A/D bits support accessed_dirty will be
 396                 * always clear.
 397                 */
 398                accessed_dirty &= pte >>
 399                        (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
 400
 401        if (unlikely(!accessed_dirty)) {
 402                ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
 403                if (unlikely(ret < 0))
 404                        goto error;
 405                else if (ret)
 406                        goto retry_walk;
 407        }
 408
 409        walker->pt_access = pt_access;
 410        walker->pte_access = pte_access;
 411        pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
 412                 __func__, (u64)pte, pte_access, pt_access);
 413        return 1;
 414
 415error:
 416        errcode |= write_fault | user_fault;
 417        if (fetch_fault && (mmu->nx ||
 418                            kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
 419                errcode |= PFERR_FETCH_MASK;
 420
 421        walker->fault.vector = PF_VECTOR;
 422        walker->fault.error_code_valid = true;
 423        walker->fault.error_code = errcode;
 424
 425#if PTTYPE == PTTYPE_EPT
 426        /*
 427         * Use PFERR_RSVD_MASK in error_code to to tell if EPT
 428         * misconfiguration requires to be injected. The detection is
 429         * done by is_rsvd_bits_set() above.
 430         *
 431         * We set up the value of exit_qualification to inject:
 432         * [2:0] - Derive from [2:0] of real exit_qualification at EPT violation
 433         * [5:3] - Calculated by the page walk of the guest EPT page tables
 434         * [7:8] - Derived from [7:8] of real exit_qualification
 435         *
 436         * The other bits are set to 0.
 437         */
 438        if (!(errcode & PFERR_RSVD_MASK)) {
 439                vcpu->arch.exit_qualification &= 0x187;
 440                vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
 441        }
 442#endif
 443        walker->fault.address = addr;
 444        walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
 445
 446        trace_kvm_mmu_walker_error(walker->fault.error_code);
 447        return 0;
 448}
 449
 450static int FNAME(walk_addr)(struct guest_walker *walker,
 451                            struct kvm_vcpu *vcpu, gva_t addr, u32 access)
 452{
 453        return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
 454                                        access);
 455}
 456
 457#if PTTYPE != PTTYPE_EPT
 458static int FNAME(walk_addr_nested)(struct guest_walker *walker,
 459                                   struct kvm_vcpu *vcpu, gva_t addr,
 460                                   u32 access)
 461{
 462        return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
 463                                        addr, access);
 464}
 465#endif
 466
 467static bool
 468FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 469                     u64 *spte, pt_element_t gpte, bool no_dirty_log)
 470{
 471        unsigned pte_access;
 472        gfn_t gfn;
 473        kvm_pfn_t pfn;
 474
 475        if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 476                return false;
 477
 478        pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 479
 480        gfn = gpte_to_gfn(gpte);
 481        pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
 482        FNAME(protect_clean_gpte)(&pte_access, gpte);
 483        pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
 484                        no_dirty_log && (pte_access & ACC_WRITE_MASK));
 485        if (is_error_pfn(pfn))
 486                return false;
 487
 488        /*
 489         * we call mmu_set_spte() with host_writable = true because
 490         * pte_prefetch_gfn_to_pfn always gets a writable pfn.
 491         */
 492        mmu_set_spte(vcpu, spte, pte_access, 0, PT_PAGE_TABLE_LEVEL, gfn, pfn,
 493                     true, true);
 494
 495        return true;
 496}
 497
 498static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 499                              u64 *spte, const void *pte)
 500{
 501        pt_element_t gpte = *(const pt_element_t *)pte;
 502
 503        FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
 504}
 505
 506static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
 507                                struct guest_walker *gw, int level)
 508{
 509        pt_element_t curr_pte;
 510        gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
 511        u64 mask;
 512        int r, index;
 513
 514        if (level == PT_PAGE_TABLE_LEVEL) {
 515                mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
 516                base_gpa = pte_gpa & ~mask;
 517                index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
 518
 519                r = kvm_vcpu_read_guest_atomic(vcpu, base_gpa,
 520                                gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
 521                curr_pte = gw->prefetch_ptes[index];
 522        } else
 523                r = kvm_vcpu_read_guest_atomic(vcpu, pte_gpa,
 524                                  &curr_pte, sizeof(curr_pte));
 525
 526        return r || curr_pte != gw->ptes[level - 1];
 527}
 528
 529static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 530                                u64 *sptep)
 531{
 532        struct kvm_mmu_page *sp;
 533        pt_element_t *gptep = gw->prefetch_ptes;
 534        u64 *spte;
 535        int i;
 536
 537        sp = page_header(__pa(sptep));
 538
 539        if (sp->role.level > PT_PAGE_TABLE_LEVEL)
 540                return;
 541
 542        if (sp->role.direct)
 543                return __direct_pte_prefetch(vcpu, sp, sptep);
 544
 545        i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
 546        spte = sp->spt + i;
 547
 548        for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
 549                if (spte == sptep)
 550                        continue;
 551
 552                if (is_shadow_present_pte(*spte))
 553                        continue;
 554
 555                if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
 556                        break;
 557        }
 558}
 559
 560/*
 561 * Fetch a shadow pte for a specific level in the paging hierarchy.
 562 * If the guest tries to write a write-protected page, we need to
 563 * emulate this operation, return 1 to indicate this case.
 564 */
 565static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 566                         struct guest_walker *gw,
 567                         int write_fault, int hlevel,
 568                         kvm_pfn_t pfn, bool map_writable, bool prefault)
 569{
 570        struct kvm_mmu_page *sp = NULL;
 571        struct kvm_shadow_walk_iterator it;
 572        unsigned direct_access, access = gw->pt_access;
 573        int top_level, emulate;
 574
 575        direct_access = gw->pte_access;
 576
 577        top_level = vcpu->arch.mmu.root_level;
 578        if (top_level == PT32E_ROOT_LEVEL)
 579                top_level = PT32_ROOT_LEVEL;
 580        /*
 581         * Verify that the top-level gpte is still there.  Since the page
 582         * is a root page, it is either write protected (and cannot be
 583         * changed from now on) or it is invalid (in which case, we don't
 584         * really care if it changes underneath us after this point).
 585         */
 586        if (FNAME(gpte_changed)(vcpu, gw, top_level))
 587                goto out_gpte_changed;
 588
 589        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
 590                goto out_gpte_changed;
 591
 592        for (shadow_walk_init(&it, vcpu, addr);
 593             shadow_walk_okay(&it) && it.level > gw->level;
 594             shadow_walk_next(&it)) {
 595                gfn_t table_gfn;
 596
 597                clear_sp_write_flooding_count(it.sptep);
 598                drop_large_spte(vcpu, it.sptep);
 599
 600                sp = NULL;
 601                if (!is_shadow_present_pte(*it.sptep)) {
 602                        table_gfn = gw->table_gfn[it.level - 2];
 603                        sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
 604                                              false, access);
 605                }
 606
 607                /*
 608                 * Verify that the gpte in the page we've just write
 609                 * protected is still there.
 610                 */
 611                if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
 612                        goto out_gpte_changed;
 613
 614                if (sp)
 615                        link_shadow_page(vcpu, it.sptep, sp);
 616        }
 617
 618        for (;
 619             shadow_walk_okay(&it) && it.level > hlevel;
 620             shadow_walk_next(&it)) {
 621                gfn_t direct_gfn;
 622
 623                clear_sp_write_flooding_count(it.sptep);
 624                validate_direct_spte(vcpu, it.sptep, direct_access);
 625
 626                drop_large_spte(vcpu, it.sptep);
 627
 628                if (is_shadow_present_pte(*it.sptep))
 629                        continue;
 630
 631                direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
 632
 633                sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
 634                                      true, direct_access);
 635                link_shadow_page(vcpu, it.sptep, sp);
 636        }
 637
 638        clear_sp_write_flooding_count(it.sptep);
 639        emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
 640                               it.level, gw->gfn, pfn, prefault, map_writable);
 641        FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 642
 643        return emulate;
 644
 645out_gpte_changed:
 646        kvm_release_pfn_clean(pfn);
 647        return 0;
 648}
 649
 650 /*
 651 * To see whether the mapped gfn can write its page table in the current
 652 * mapping.
 653 *
 654 * It is the helper function of FNAME(page_fault). When guest uses large page
 655 * size to map the writable gfn which is used as current page table, we should
 656 * force kvm to use small page size to map it because new shadow page will be
 657 * created when kvm establishes shadow page table that stop kvm using large
 658 * page size. Do it early can avoid unnecessary #PF and emulation.
 659 *
 660 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
 661 * currently used as its page table.
 662 *
 663 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
 664 * since the PDPT is always shadowed, that means, we can not use large page
 665 * size to map the gfn which is used as PDPT.
 666 */
 667static bool
 668FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
 669                              struct guest_walker *walker, int user_fault,
 670                              bool *write_fault_to_shadow_pgtable)
 671{
 672        int level;
 673        gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
 674        bool self_changed = false;
 675
 676        if (!(walker->pte_access & ACC_WRITE_MASK ||
 677              (!is_write_protection(vcpu) && !user_fault)))
 678                return false;
 679
 680        for (level = walker->level; level <= walker->max_level; level++) {
 681                gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
 682
 683                self_changed |= !(gfn & mask);
 684                *write_fault_to_shadow_pgtable |= !gfn;
 685        }
 686
 687        return self_changed;
 688}
 689
 690/*
 691 * Page fault handler.  There are several causes for a page fault:
 692 *   - there is no shadow pte for the guest pte
 693 *   - write access through a shadow pte marked read only so that we can set
 694 *     the dirty bit
 695 *   - write access to a shadow pte marked read only so we can update the page
 696 *     dirty bitmap, when userspace requests it
 697 *   - mmio access; in this case we will never install a present shadow pte
 698 *   - normal guest page fault due to the guest pte marked not present, not
 699 *     writable, or not executable
 700 *
 701 *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
 702 *           a negative value on error.
 703 */
 704static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 705                             bool prefault)
 706{
 707        int write_fault = error_code & PFERR_WRITE_MASK;
 708        int user_fault = error_code & PFERR_USER_MASK;
 709        struct guest_walker walker;
 710        int r;
 711        kvm_pfn_t pfn;
 712        int level = PT_PAGE_TABLE_LEVEL;
 713        bool force_pt_level = false;
 714        unsigned long mmu_seq;
 715        bool map_writable, is_self_change_mapping;
 716
 717        pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 718
 719        r = mmu_topup_memory_caches(vcpu);
 720        if (r)
 721                return r;
 722
 723        /*
 724         * If PFEC.RSVD is set, this is a shadow page fault.
 725         * The bit needs to be cleared before walking guest page tables.
 726         */
 727        error_code &= ~PFERR_RSVD_MASK;
 728
 729        /*
 730         * Look up the guest pte for the faulting address.
 731         */
 732        r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
 733
 734        /*
 735         * The page is not mapped by the guest.  Let the guest handle it.
 736         */
 737        if (!r) {
 738                pgprintk("%s: guest page fault\n", __func__);
 739                if (!prefault)
 740                        inject_page_fault(vcpu, &walker.fault);
 741
 742                return 0;
 743        }
 744
 745        if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
 746                shadow_page_table_clear_flood(vcpu, addr);
 747                return 1;
 748        }
 749
 750        vcpu->arch.write_fault_to_shadow_pgtable = false;
 751
 752        is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
 753              &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
 754
 755        if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) {
 756                level = mapping_level(vcpu, walker.gfn, &force_pt_level);
 757                if (likely(!force_pt_level)) {
 758                        level = min(walker.level, level);
 759                        walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
 760                }
 761        } else
 762                force_pt_level = true;
 763
 764        mmu_seq = vcpu->kvm->mmu_notifier_seq;
 765        smp_rmb();
 766
 767        if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
 768                         &map_writable))
 769                return 0;
 770
 771        if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
 772                                walker.gfn, pfn, walker.pte_access, &r))
 773                return r;
 774
 775        /*
 776         * Do not change pte_access if the pfn is a mmio page, otherwise
 777         * we will cache the incorrect access into mmio spte.
 778         */
 779        if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
 780             !is_write_protection(vcpu) && !user_fault &&
 781              !is_noslot_pfn(pfn)) {
 782                walker.pte_access |= ACC_WRITE_MASK;
 783                walker.pte_access &= ~ACC_USER_MASK;
 784
 785                /*
 786                 * If we converted a user page to a kernel page,
 787                 * so that the kernel can write to it when cr0.wp=0,
 788                 * then we should prevent the kernel from executing it
 789                 * if SMEP is enabled.
 790                 */
 791                if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
 792                        walker.pte_access &= ~ACC_EXEC_MASK;
 793        }
 794
 795        spin_lock(&vcpu->kvm->mmu_lock);
 796        if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
 797                goto out_unlock;
 798
 799        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
 800        make_mmu_pages_available(vcpu);
 801        if (!force_pt_level)
 802                transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
 803        r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
 804                         level, pfn, map_writable, prefault);
 805        ++vcpu->stat.pf_fixed;
 806        kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 807        spin_unlock(&vcpu->kvm->mmu_lock);
 808
 809        return r;
 810
 811out_unlock:
 812        spin_unlock(&vcpu->kvm->mmu_lock);
 813        kvm_release_pfn_clean(pfn);
 814        return 0;
 815}
 816
 817static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
 818{
 819        int offset = 0;
 820
 821        WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
 822
 823        if (PTTYPE == 32)
 824                offset = sp->role.quadrant << PT64_LEVEL_BITS;
 825
 826        return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
 827}
 828
 829static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 830{
 831        struct kvm_shadow_walk_iterator iterator;
 832        struct kvm_mmu_page *sp;
 833        int level;
 834        u64 *sptep;
 835
 836        vcpu_clear_mmio_info(vcpu, gva);
 837
 838        /*
 839         * No need to check return value here, rmap_can_add() can
 840         * help us to skip pte prefetch later.
 841         */
 842        mmu_topup_memory_caches(vcpu);
 843
 844        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
 845                WARN_ON(1);
 846                return;
 847        }
 848
 849        spin_lock(&vcpu->kvm->mmu_lock);
 850        for_each_shadow_entry(vcpu, gva, iterator) {
 851                level = iterator.level;
 852                sptep = iterator.sptep;
 853
 854                sp = page_header(__pa(sptep));
 855                if (is_last_spte(*sptep, level)) {
 856                        pt_element_t gpte;
 857                        gpa_t pte_gpa;
 858
 859                        if (!sp->unsync)
 860                                break;
 861
 862                        pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 863                        pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 864
 865                        if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
 866                                kvm_flush_remote_tlbs(vcpu->kvm);
 867
 868                        if (!rmap_can_add(vcpu))
 869                                break;
 870
 871                        if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
 872                                                       sizeof(pt_element_t)))
 873                                break;
 874
 875                        FNAME(update_pte)(vcpu, sp, sptep, &gpte);
 876                }
 877
 878                if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
 879                        break;
 880        }
 881        spin_unlock(&vcpu->kvm->mmu_lock);
 882}
 883
 884static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
 885                               struct x86_exception *exception)
 886{
 887        struct guest_walker walker;
 888        gpa_t gpa = UNMAPPED_GVA;
 889        int r;
 890
 891        r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
 892
 893        if (r) {
 894                gpa = gfn_to_gpa(walker.gfn);
 895                gpa |= vaddr & ~PAGE_MASK;
 896        } else if (exception)
 897                *exception = walker.fault;
 898
 899        return gpa;
 900}
 901
 902#if PTTYPE != PTTYPE_EPT
 903static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
 904                                      u32 access,
 905                                      struct x86_exception *exception)
 906{
 907        struct guest_walker walker;
 908        gpa_t gpa = UNMAPPED_GVA;
 909        int r;
 910
 911        r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
 912
 913        if (r) {
 914                gpa = gfn_to_gpa(walker.gfn);
 915                gpa |= vaddr & ~PAGE_MASK;
 916        } else if (exception)
 917                *exception = walker.fault;
 918
 919        return gpa;
 920}
 921#endif
 922
 923/*
 924 * Using the cached information from sp->gfns is safe because:
 925 * - The spte has a reference to the struct page, so the pfn for a given gfn
 926 *   can't change unless all sptes pointing to it are nuked first.
 927 *
 928 * Note:
 929 *   We should flush all tlbs if spte is dropped even though guest is
 930 *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
 931 *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
 932 *   used by guest then tlbs are not flushed, so guest is allowed to access the
 933 *   freed pages.
 934 *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
 935 */
 936static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 937{
 938        int i, nr_present = 0;
 939        bool host_writable;
 940        gpa_t first_pte_gpa;
 941
 942        /* direct kvm_mmu_page can not be unsync. */
 943        BUG_ON(sp->role.direct);
 944
 945        first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 946
 947        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
 948                unsigned pte_access;
 949                pt_element_t gpte;
 950                gpa_t pte_gpa;
 951                gfn_t gfn;
 952
 953                if (!sp->spt[i])
 954                        continue;
 955
 956                pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 957
 958                if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
 959                                               sizeof(pt_element_t)))
 960                        return 0;
 961
 962                if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
 963                        /*
 964                         * Update spte before increasing tlbs_dirty to make
 965                         * sure no tlb flush is lost after spte is zapped; see
 966                         * the comments in kvm_flush_remote_tlbs().
 967                         */
 968                        smp_wmb();
 969                        vcpu->kvm->tlbs_dirty++;
 970                        continue;
 971                }
 972
 973                gfn = gpte_to_gfn(gpte);
 974                pte_access = sp->role.access;
 975                pte_access &= FNAME(gpte_access)(vcpu, gpte);
 976                FNAME(protect_clean_gpte)(&pte_access, gpte);
 977
 978                if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
 979                      &nr_present))
 980                        continue;
 981
 982                if (gfn != sp->gfns[i]) {
 983                        drop_spte(vcpu->kvm, &sp->spt[i]);
 984                        /*
 985                         * The same as above where we are doing
 986                         * prefetch_invalid_gpte().
 987                         */
 988                        smp_wmb();
 989                        vcpu->kvm->tlbs_dirty++;
 990                        continue;
 991                }
 992
 993                nr_present++;
 994
 995                host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
 996
 997                set_spte(vcpu, &sp->spt[i], pte_access,
 998                         PT_PAGE_TABLE_LEVEL, gfn,
 999                         spte_to_pfn(sp->spt[i]), true, false,
1000                         host_writable);
1001        }
1002
1003        return nr_present;
1004}
1005
1006#undef pt_element_t
1007#undef guest_walker
1008#undef FNAME
1009#undef PT_BASE_ADDR_MASK
1010#undef PT_INDEX
1011#undef PT_LVL_ADDR_MASK
1012#undef PT_LVL_OFFSET_MASK
1013#undef PT_LEVEL_BITS
1014#undef PT_MAX_FULL_LEVELS
1015#undef gpte_to_gfn
1016#undef gpte_to_gfn_lvl
1017#undef CMPXCHG
1018#undef PT_GUEST_ACCESSED_MASK
1019#undef PT_GUEST_DIRTY_MASK
1020#undef PT_GUEST_DIRTY_SHIFT
1021#undef PT_GUEST_ACCESSED_SHIFT
1022