linux/arch/x86/kvm/paging_tmpl.h
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This module enables machines with Intel VT-x extensions to run virtual
   5 * machines without emulation or binary translation.
   6 *
   7 * MMU support
   8 *
   9 * Copyright (C) 2006 Qumranet, Inc.
  10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  11 *
  12 * Authors:
  13 *   Yaniv Kamay  <yaniv@qumranet.com>
  14 *   Avi Kivity   <avi@qumranet.com>
  15 *
  16 * This work is licensed under the terms of the GNU GPL, version 2.  See
  17 * the COPYING file in the top-level directory.
  18 *
  19 */
  20
  21/*
  22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  23 * so the code in this file is compiled twice, once per pte size.
  24 */
  25
  26#if PTTYPE == 64
  27        #define pt_element_t u64
  28        #define guest_walker guest_walker64
  29        #define FNAME(name) paging##64_##name
  30        #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  31        #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  32        #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  33        #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  34        #define PT_LEVEL_BITS PT64_LEVEL_BITS
  35        #ifdef CONFIG_X86_64
  36        #define PT_MAX_FULL_LEVELS 4
  37        #define CMPXCHG cmpxchg
  38        #else
  39        #define CMPXCHG cmpxchg64
  40        #define PT_MAX_FULL_LEVELS 2
  41        #endif
  42#elif PTTYPE == 32
  43        #define pt_element_t u32
  44        #define guest_walker guest_walker32
  45        #define FNAME(name) paging##32_##name
  46        #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  47        #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
  48        #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
  49        #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  50        #define PT_LEVEL_BITS PT32_LEVEL_BITS
  51        #define PT_MAX_FULL_LEVELS 2
  52        #define CMPXCHG cmpxchg
  53#else
  54        #error Invalid PTTYPE value
  55#endif
  56
  57#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
  58#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
  59
  60/*
  61 * The guest_walker structure emulates the behavior of the hardware page
  62 * table walker.
  63 */
  64struct guest_walker {
  65        int level;
  66        unsigned max_level;
  67        gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  68        pt_element_t ptes[PT_MAX_FULL_LEVELS];
  69        pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
  70        gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
  71        pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
  72        unsigned pt_access;
  73        unsigned pte_access;
  74        gfn_t gfn;
  75        struct x86_exception fault;
  76};
  77
  78static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
  79{
  80        return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
  81}
  82
  83static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  84                               pt_element_t __user *ptep_user, unsigned index,
  85                               pt_element_t orig_pte, pt_element_t new_pte)
  86{
  87        int npages;
  88        pt_element_t ret;
  89        pt_element_t *table;
  90        struct page *page;
  91
  92        npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
  93        /* Check if the user is doing something meaningless. */
  94        if (unlikely(npages != 1))
  95                return -EFAULT;
  96
  97        table = kmap_atomic(page);
  98        ret = CMPXCHG(&table[index], orig_pte, new_pte);
  99        kunmap_atomic(table);
 100
 101        kvm_release_page_dirty(page);
 102
 103        return (ret != orig_pte);
 104}
 105
 106static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
 107                                             struct kvm_mmu *mmu,
 108                                             struct guest_walker *walker,
 109                                             int write_fault)
 110{
 111        unsigned level, index;
 112        pt_element_t pte, orig_pte;
 113        pt_element_t __user *ptep_user;
 114        gfn_t table_gfn;
 115        int ret;
 116
 117        for (level = walker->max_level; level >= walker->level; --level) {
 118                pte = orig_pte = walker->ptes[level - 1];
 119                table_gfn = walker->table_gfn[level - 1];
 120                ptep_user = walker->ptep_user[level - 1];
 121                index = offset_in_page(ptep_user) / sizeof(pt_element_t);
 122                if (!(pte & PT_ACCESSED_MASK)) {
 123                        trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
 124                        pte |= PT_ACCESSED_MASK;
 125                }
 126                if (level == walker->level && write_fault && !is_dirty_gpte(pte)) {
 127                        trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
 128                        pte |= PT_DIRTY_MASK;
 129                }
 130                if (pte == orig_pte)
 131                        continue;
 132
 133                ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
 134                if (ret)
 135                        return ret;
 136
 137                mark_page_dirty(vcpu->kvm, table_gfn);
 138                walker->ptes[level] = pte;
 139        }
 140        return 0;
 141}
 142
 143/*
 144 * Fetch a guest pte for a guest virtual address
 145 */
 146static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 147                                    struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 148                                    gva_t addr, u32 access)
 149{
 150        int ret;
 151        pt_element_t pte;
 152        pt_element_t __user *uninitialized_var(ptep_user);
 153        gfn_t table_gfn;
 154        unsigned index, pt_access, pte_access, accessed_dirty;
 155        gpa_t pte_gpa;
 156        int offset;
 157        const int write_fault = access & PFERR_WRITE_MASK;
 158        const int user_fault  = access & PFERR_USER_MASK;
 159        const int fetch_fault = access & PFERR_FETCH_MASK;
 160        u16 errcode = 0;
 161        gpa_t real_gpa;
 162        gfn_t gfn;
 163
 164        trace_kvm_mmu_pagetable_walk(addr, access);
 165retry_walk:
 166        walker->level = mmu->root_level;
 167        pte           = mmu->get_cr3(vcpu);
 168
 169#if PTTYPE == 64
 170        if (walker->level == PT32E_ROOT_LEVEL) {
 171                pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
 172                trace_kvm_mmu_paging_element(pte, walker->level);
 173                if (!is_present_gpte(pte))
 174                        goto error;
 175                --walker->level;
 176        }
 177#endif
 178        walker->max_level = walker->level;
 179        ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
 180               (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
 181
 182        accessed_dirty = PT_ACCESSED_MASK;
 183        pt_access = pte_access = ACC_ALL;
 184        ++walker->level;
 185
 186        do {
 187                gfn_t real_gfn;
 188                unsigned long host_addr;
 189
 190                pt_access &= pte_access;
 191                --walker->level;
 192
 193                index = PT_INDEX(addr, walker->level);
 194
 195                table_gfn = gpte_to_gfn(pte);
 196                offset    = index * sizeof(pt_element_t);
 197                pte_gpa   = gfn_to_gpa(table_gfn) + offset;
 198                walker->table_gfn[walker->level - 1] = table_gfn;
 199                walker->pte_gpa[walker->level - 1] = pte_gpa;
 200
 201                real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
 202                                              PFERR_USER_MASK|PFERR_WRITE_MASK);
 203                if (unlikely(real_gfn == UNMAPPED_GVA))
 204                        goto error;
 205                real_gfn = gpa_to_gfn(real_gfn);
 206
 207                host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
 208                if (unlikely(kvm_is_error_hva(host_addr)))
 209                        goto error;
 210
 211                ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
 212                if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
 213                        goto error;
 214                walker->ptep_user[walker->level - 1] = ptep_user;
 215
 216                trace_kvm_mmu_paging_element(pte, walker->level);
 217
 218                if (unlikely(!is_present_gpte(pte)))
 219                        goto error;
 220
 221                if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
 222                                              walker->level))) {
 223                        errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
 224                        goto error;
 225                }
 226
 227                accessed_dirty &= pte;
 228                pte_access = pt_access & gpte_access(vcpu, pte);
 229
 230                walker->ptes[walker->level - 1] = pte;
 231        } while (!is_last_gpte(mmu, walker->level, pte));
 232
 233        if (unlikely(permission_fault(mmu, pte_access, access))) {
 234                errcode |= PFERR_PRESENT_MASK;
 235                goto error;
 236        }
 237
 238        gfn = gpte_to_gfn_lvl(pte, walker->level);
 239        gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
 240
 241        if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
 242                gfn += pse36_gfn_delta(pte);
 243
 244        real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access);
 245        if (real_gpa == UNMAPPED_GVA)
 246                return 0;
 247
 248        walker->gfn = real_gpa >> PAGE_SHIFT;
 249
 250        if (!write_fault)
 251                protect_clean_gpte(&pte_access, pte);
 252        else
 253                /*
 254                 * On a write fault, fold the dirty bit into accessed_dirty by
 255                 * shifting it one place right.
 256                 */
 257                accessed_dirty &= pte >> (PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT);
 258
 259        if (unlikely(!accessed_dirty)) {
 260                ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
 261                if (unlikely(ret < 0))
 262                        goto error;
 263                else if (ret)
 264                        goto retry_walk;
 265        }
 266
 267        walker->pt_access = pt_access;
 268        walker->pte_access = pte_access;
 269        pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
 270                 __func__, (u64)pte, pte_access, pt_access);
 271        return 1;
 272
 273error:
 274        errcode |= write_fault | user_fault;
 275        if (fetch_fault && (mmu->nx ||
 276                            kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
 277                errcode |= PFERR_FETCH_MASK;
 278
 279        walker->fault.vector = PF_VECTOR;
 280        walker->fault.error_code_valid = true;
 281        walker->fault.error_code = errcode;
 282        walker->fault.address = addr;
 283        walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
 284
 285        trace_kvm_mmu_walker_error(walker->fault.error_code);
 286        return 0;
 287}
 288
 289static int FNAME(walk_addr)(struct guest_walker *walker,
 290                            struct kvm_vcpu *vcpu, gva_t addr, u32 access)
 291{
 292        return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
 293                                        access);
 294}
 295
 296static int FNAME(walk_addr_nested)(struct guest_walker *walker,
 297                                   struct kvm_vcpu *vcpu, gva_t addr,
 298                                   u32 access)
 299{
 300        return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
 301                                        addr, access);
 302}
 303
 304static bool
 305FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 306                     u64 *spte, pt_element_t gpte, bool no_dirty_log)
 307{
 308        unsigned pte_access;
 309        gfn_t gfn;
 310        pfn_t pfn;
 311
 312        if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
 313                return false;
 314
 315        pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 316
 317        gfn = gpte_to_gfn(gpte);
 318        pte_access = sp->role.access & gpte_access(vcpu, gpte);
 319        protect_clean_gpte(&pte_access, gpte);
 320        pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
 321                        no_dirty_log && (pte_access & ACC_WRITE_MASK));
 322        if (is_error_pfn(pfn))
 323                return false;
 324
 325        /*
 326         * we call mmu_set_spte() with host_writable = true because
 327         * pte_prefetch_gfn_to_pfn always gets a writable pfn.
 328         */
 329        mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL,
 330                     gfn, pfn, true, true);
 331
 332        return true;
 333}
 334
 335static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 336                              u64 *spte, const void *pte)
 337{
 338        pt_element_t gpte = *(const pt_element_t *)pte;
 339
 340        FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
 341}
 342
 343static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
 344                                struct guest_walker *gw, int level)
 345{
 346        pt_element_t curr_pte;
 347        gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
 348        u64 mask;
 349        int r, index;
 350
 351        if (level == PT_PAGE_TABLE_LEVEL) {
 352                mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
 353                base_gpa = pte_gpa & ~mask;
 354                index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
 355
 356                r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
 357                                gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
 358                curr_pte = gw->prefetch_ptes[index];
 359        } else
 360                r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
 361                                  &curr_pte, sizeof(curr_pte));
 362
 363        return r || curr_pte != gw->ptes[level - 1];
 364}
 365
 366static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
 367                                u64 *sptep)
 368{
 369        struct kvm_mmu_page *sp;
 370        pt_element_t *gptep = gw->prefetch_ptes;
 371        u64 *spte;
 372        int i;
 373
 374        sp = page_header(__pa(sptep));
 375
 376        if (sp->role.level > PT_PAGE_TABLE_LEVEL)
 377                return;
 378
 379        if (sp->role.direct)
 380                return __direct_pte_prefetch(vcpu, sp, sptep);
 381
 382        i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
 383        spte = sp->spt + i;
 384
 385        for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
 386                if (spte == sptep)
 387                        continue;
 388
 389                if (is_shadow_present_pte(*spte))
 390                        continue;
 391
 392                if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
 393                        break;
 394        }
 395}
 396
 397/*
 398 * Fetch a shadow pte for a specific level in the paging hierarchy.
 399 * If the guest tries to write a write-protected page, we need to
 400 * emulate this operation, return 1 to indicate this case.
 401 */
 402static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 403                         struct guest_walker *gw,
 404                         int write_fault, int hlevel,
 405                         pfn_t pfn, bool map_writable, bool prefault)
 406{
 407        struct kvm_mmu_page *sp = NULL;
 408        struct kvm_shadow_walk_iterator it;
 409        unsigned direct_access, access = gw->pt_access;
 410        int top_level, emulate = 0;
 411
 412        direct_access = gw->pte_access;
 413
 414        top_level = vcpu->arch.mmu.root_level;
 415        if (top_level == PT32E_ROOT_LEVEL)
 416                top_level = PT32_ROOT_LEVEL;
 417        /*
 418         * Verify that the top-level gpte is still there.  Since the page
 419         * is a root page, it is either write protected (and cannot be
 420         * changed from now on) or it is invalid (in which case, we don't
 421         * really care if it changes underneath us after this point).
 422         */
 423        if (FNAME(gpte_changed)(vcpu, gw, top_level))
 424                goto out_gpte_changed;
 425
 426        for (shadow_walk_init(&it, vcpu, addr);
 427             shadow_walk_okay(&it) && it.level > gw->level;
 428             shadow_walk_next(&it)) {
 429                gfn_t table_gfn;
 430
 431                clear_sp_write_flooding_count(it.sptep);
 432                drop_large_spte(vcpu, it.sptep);
 433
 434                sp = NULL;
 435                if (!is_shadow_present_pte(*it.sptep)) {
 436                        table_gfn = gw->table_gfn[it.level - 2];
 437                        sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
 438                                              false, access, it.sptep);
 439                }
 440
 441                /*
 442                 * Verify that the gpte in the page we've just write
 443                 * protected is still there.
 444                 */
 445                if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
 446                        goto out_gpte_changed;
 447
 448                if (sp)
 449                        link_shadow_page(it.sptep, sp);
 450        }
 451
 452        for (;
 453             shadow_walk_okay(&it) && it.level > hlevel;
 454             shadow_walk_next(&it)) {
 455                gfn_t direct_gfn;
 456
 457                clear_sp_write_flooding_count(it.sptep);
 458                validate_direct_spte(vcpu, it.sptep, direct_access);
 459
 460                drop_large_spte(vcpu, it.sptep);
 461
 462                if (is_shadow_present_pte(*it.sptep))
 463                        continue;
 464
 465                direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
 466
 467                sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
 468                                      true, direct_access, it.sptep);
 469                link_shadow_page(it.sptep, sp);
 470        }
 471
 472        clear_sp_write_flooding_count(it.sptep);
 473        mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate,
 474                     it.level, gw->gfn, pfn, prefault, map_writable);
 475        FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 476
 477        return emulate;
 478
 479out_gpte_changed:
 480        if (sp)
 481                kvm_mmu_put_page(sp, it.sptep);
 482        kvm_release_pfn_clean(pfn);
 483        return 0;
 484}
 485
 486 /*
 487 * To see whether the mapped gfn can write its page table in the current
 488 * mapping.
 489 *
 490 * It is the helper function of FNAME(page_fault). When guest uses large page
 491 * size to map the writable gfn which is used as current page table, we should
 492 * force kvm to use small page size to map it because new shadow page will be
 493 * created when kvm establishes shadow page table that stop kvm using large
 494 * page size. Do it early can avoid unnecessary #PF and emulation.
 495 *
 496 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
 497 * currently used as its page table.
 498 *
 499 * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
 500 * since the PDPT is always shadowed, that means, we can not use large page
 501 * size to map the gfn which is used as PDPT.
 502 */
 503static bool
 504FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
 505                              struct guest_walker *walker, int user_fault,
 506                              bool *write_fault_to_shadow_pgtable)
 507{
 508        int level;
 509        gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
 510        bool self_changed = false;
 511
 512        if (!(walker->pte_access & ACC_WRITE_MASK ||
 513              (!is_write_protection(vcpu) && !user_fault)))
 514                return false;
 515
 516        for (level = walker->level; level <= walker->max_level; level++) {
 517                gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
 518
 519                self_changed |= !(gfn & mask);
 520                *write_fault_to_shadow_pgtable |= !gfn;
 521        }
 522
 523        return self_changed;
 524}
 525
 526/*
 527 * Page fault handler.  There are several causes for a page fault:
 528 *   - there is no shadow pte for the guest pte
 529 *   - write access through a shadow pte marked read only so that we can set
 530 *     the dirty bit
 531 *   - write access to a shadow pte marked read only so we can update the page
 532 *     dirty bitmap, when userspace requests it
 533 *   - mmio access; in this case we will never install a present shadow pte
 534 *   - normal guest page fault due to the guest pte marked not present, not
 535 *     writable, or not executable
 536 *
 537 *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
 538 *           a negative value on error.
 539 */
 540static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 541                             bool prefault)
 542{
 543        int write_fault = error_code & PFERR_WRITE_MASK;
 544        int user_fault = error_code & PFERR_USER_MASK;
 545        struct guest_walker walker;
 546        int r;
 547        pfn_t pfn;
 548        int level = PT_PAGE_TABLE_LEVEL;
 549        int force_pt_level;
 550        unsigned long mmu_seq;
 551        bool map_writable, is_self_change_mapping;
 552
 553        pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 554
 555        if (unlikely(error_code & PFERR_RSVD_MASK))
 556                return handle_mmio_page_fault(vcpu, addr, error_code,
 557                                              mmu_is_nested(vcpu));
 558
 559        r = mmu_topup_memory_caches(vcpu);
 560        if (r)
 561                return r;
 562
 563        /*
 564         * Look up the guest pte for the faulting address.
 565         */
 566        r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
 567
 568        /*
 569         * The page is not mapped by the guest.  Let the guest handle it.
 570         */
 571        if (!r) {
 572                pgprintk("%s: guest page fault\n", __func__);
 573                if (!prefault)
 574                        inject_page_fault(vcpu, &walker.fault);
 575
 576                return 0;
 577        }
 578
 579        vcpu->arch.write_fault_to_shadow_pgtable = false;
 580
 581        is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
 582              &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
 583
 584        if (walker.level >= PT_DIRECTORY_LEVEL)
 585                force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
 586                   || is_self_change_mapping;
 587        else
 588                force_pt_level = 1;
 589        if (!force_pt_level) {
 590                level = min(walker.level, mapping_level(vcpu, walker.gfn));
 591                walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
 592        }
 593
 594        mmu_seq = vcpu->kvm->mmu_notifier_seq;
 595        smp_rmb();
 596
 597        if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
 598                         &map_writable))
 599                return 0;
 600
 601        if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
 602                                walker.gfn, pfn, walker.pte_access, &r))
 603                return r;
 604
 605        /*
 606         * Do not change pte_access if the pfn is a mmio page, otherwise
 607         * we will cache the incorrect access into mmio spte.
 608         */
 609        if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
 610             !is_write_protection(vcpu) && !user_fault &&
 611              !is_noslot_pfn(pfn)) {
 612                walker.pte_access |= ACC_WRITE_MASK;
 613                walker.pte_access &= ~ACC_USER_MASK;
 614
 615                /*
 616                 * If we converted a user page to a kernel page,
 617                 * so that the kernel can write to it when cr0.wp=0,
 618                 * then we should prevent the kernel from executing it
 619                 * if SMEP is enabled.
 620                 */
 621                if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
 622                        walker.pte_access &= ~ACC_EXEC_MASK;
 623        }
 624
 625        spin_lock(&vcpu->kvm->mmu_lock);
 626        if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
 627                goto out_unlock;
 628
 629        kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
 630        make_mmu_pages_available(vcpu);
 631        if (!force_pt_level)
 632                transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
 633        r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
 634                         level, pfn, map_writable, prefault);
 635        ++vcpu->stat.pf_fixed;
 636        kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
 637        spin_unlock(&vcpu->kvm->mmu_lock);
 638
 639        return r;
 640
 641out_unlock:
 642        spin_unlock(&vcpu->kvm->mmu_lock);
 643        kvm_release_pfn_clean(pfn);
 644        return 0;
 645}
 646
 647static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
 648{
 649        int offset = 0;
 650
 651        WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
 652
 653        if (PTTYPE == 32)
 654                offset = sp->role.quadrant << PT64_LEVEL_BITS;
 655
 656        return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
 657}
 658
 659static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 660{
 661        struct kvm_shadow_walk_iterator iterator;
 662        struct kvm_mmu_page *sp;
 663        int level;
 664        u64 *sptep;
 665
 666        vcpu_clear_mmio_info(vcpu, gva);
 667
 668        /*
 669         * No need to check return value here, rmap_can_add() can
 670         * help us to skip pte prefetch later.
 671         */
 672        mmu_topup_memory_caches(vcpu);
 673
 674        spin_lock(&vcpu->kvm->mmu_lock);
 675        for_each_shadow_entry(vcpu, gva, iterator) {
 676                level = iterator.level;
 677                sptep = iterator.sptep;
 678
 679                sp = page_header(__pa(sptep));
 680                if (is_last_spte(*sptep, level)) {
 681                        pt_element_t gpte;
 682                        gpa_t pte_gpa;
 683
 684                        if (!sp->unsync)
 685                                break;
 686
 687                        pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 688                        pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 689
 690                        if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
 691                                kvm_flush_remote_tlbs(vcpu->kvm);
 692
 693                        if (!rmap_can_add(vcpu))
 694                                break;
 695
 696                        if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
 697                                                  sizeof(pt_element_t)))
 698                                break;
 699
 700                        FNAME(update_pte)(vcpu, sp, sptep, &gpte);
 701                }
 702
 703                if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
 704                        break;
 705        }
 706        spin_unlock(&vcpu->kvm->mmu_lock);
 707}
 708
 709static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
 710                               struct x86_exception *exception)
 711{
 712        struct guest_walker walker;
 713        gpa_t gpa = UNMAPPED_GVA;
 714        int r;
 715
 716        r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
 717
 718        if (r) {
 719                gpa = gfn_to_gpa(walker.gfn);
 720                gpa |= vaddr & ~PAGE_MASK;
 721        } else if (exception)
 722                *exception = walker.fault;
 723
 724        return gpa;
 725}
 726
 727static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
 728                                      u32 access,
 729                                      struct x86_exception *exception)
 730{
 731        struct guest_walker walker;
 732        gpa_t gpa = UNMAPPED_GVA;
 733        int r;
 734
 735        r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
 736
 737        if (r) {
 738                gpa = gfn_to_gpa(walker.gfn);
 739                gpa |= vaddr & ~PAGE_MASK;
 740        } else if (exception)
 741                *exception = walker.fault;
 742
 743        return gpa;
 744}
 745
 746/*
 747 * Using the cached information from sp->gfns is safe because:
 748 * - The spte has a reference to the struct page, so the pfn for a given gfn
 749 *   can't change unless all sptes pointing to it are nuked first.
 750 *
 751 * Note:
 752 *   We should flush all tlbs if spte is dropped even though guest is
 753 *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
 754 *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
 755 *   used by guest then tlbs are not flushed, so guest is allowed to access the
 756 *   freed pages.
 757 *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
 758 */
 759static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 760{
 761        int i, nr_present = 0;
 762        bool host_writable;
 763        gpa_t first_pte_gpa;
 764
 765        /* direct kvm_mmu_page can not be unsync. */
 766        BUG_ON(sp->role.direct);
 767
 768        first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
 769
 770        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
 771                unsigned pte_access;
 772                pt_element_t gpte;
 773                gpa_t pte_gpa;
 774                gfn_t gfn;
 775
 776                if (!sp->spt[i])
 777                        continue;
 778
 779                pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
 780
 781                if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
 782                                          sizeof(pt_element_t)))
 783                        return -EINVAL;
 784
 785                if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) {
 786                        vcpu->kvm->tlbs_dirty++;
 787                        continue;
 788                }
 789
 790                gfn = gpte_to_gfn(gpte);
 791                pte_access = sp->role.access;
 792                pte_access &= gpte_access(vcpu, gpte);
 793                protect_clean_gpte(&pte_access, gpte);
 794
 795                if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
 796                        continue;
 797
 798                if (gfn != sp->gfns[i]) {
 799                        drop_spte(vcpu->kvm, &sp->spt[i]);
 800                        vcpu->kvm->tlbs_dirty++;
 801                        continue;
 802                }
 803
 804                nr_present++;
 805
 806                host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
 807
 808                set_spte(vcpu, &sp->spt[i], pte_access,
 809                         PT_PAGE_TABLE_LEVEL, gfn,
 810                         spte_to_pfn(sp->spt[i]), true, false,
 811                         host_writable);
 812        }
 813
 814        return !nr_present;
 815}
 816
 817#undef pt_element_t
 818#undef guest_walker
 819#undef FNAME
 820#undef PT_BASE_ADDR_MASK
 821#undef PT_INDEX
 822#undef PT_LVL_ADDR_MASK
 823#undef PT_LVL_OFFSET_MASK
 824#undef PT_LEVEL_BITS
 825#undef PT_MAX_FULL_LEVELS
 826#undef gpte_to_gfn
 827#undef gpte_to_gfn_lvl
 828#undef CMPXCHG
 829