linux/arch/x86/kvm/mmu.c
<<
>>
Prefs
   1/*
   2 * Kernel-based Virtual Machine driver for Linux
   3 *
   4 * This module enables machines with Intel VT-x extensions to run virtual
   5 * machines without emulation or binary translation.
   6 *
   7 * MMU support
   8 *
   9 * Copyright (C) 2006 Qumranet, Inc.
  10 *
  11 * Authors:
  12 *   Yaniv Kamay  <yaniv@qumranet.com>
  13 *   Avi Kivity   <avi@qumranet.com>
  14 *
  15 * This work is licensed under the terms of the GNU GPL, version 2.  See
  16 * the COPYING file in the top-level directory.
  17 *
  18 */
  19
  20#include "mmu.h"
  21#include "kvm_cache_regs.h"
  22
  23#include <linux/kvm_host.h>
  24#include <linux/types.h>
  25#include <linux/string.h>
  26#include <linux/mm.h>
  27#include <linux/highmem.h>
  28#include <linux/module.h>
  29#include <linux/swap.h>
  30#include <linux/hugetlb.h>
  31#include <linux/compiler.h>
  32
  33#include <asm/page.h>
  34#include <asm/cmpxchg.h>
  35#include <asm/io.h>
  36#include <asm/vmx.h>
  37
  38/*
  39 * When setting this variable to true it enables Two-Dimensional-Paging
  40 * where the hardware walks 2 page tables:
  41 * 1. the guest-virtual to guest-physical
  42 * 2. while doing 1. it walks guest-physical to host-physical
  43 * If the hardware supports that we don't need to do shadow paging.
  44 */
  45bool tdp_enabled = false;
  46
  47#undef MMU_DEBUG
  48
  49#undef AUDIT
  50
  51#ifdef AUDIT
  52static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
  53#else
  54static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
  55#endif
  56
  57#ifdef MMU_DEBUG
  58
  59#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
  60#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
  61
  62#else
  63
  64#define pgprintk(x...) do { } while (0)
  65#define rmap_printk(x...) do { } while (0)
  66
  67#endif
  68
  69#if defined(MMU_DEBUG) || defined(AUDIT)
  70static int dbg = 0;
  71module_param(dbg, bool, 0644);
  72#endif
  73
  74static int oos_shadow = 1;
  75module_param(oos_shadow, bool, 0644);
  76
  77#ifndef MMU_DEBUG
  78#define ASSERT(x) do { } while (0)
  79#else
  80#define ASSERT(x)                                                       \
  81        if (!(x)) {                                                     \
  82                printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
  83                       __FILE__, __LINE__, #x);                         \
  84        }
  85#endif
  86
  87#define PT_FIRST_AVAIL_BITS_SHIFT 9
  88#define PT64_SECOND_AVAIL_BITS_SHIFT 52
  89
  90#define VALID_PAGE(x) ((x) != INVALID_PAGE)
  91
  92#define PT64_LEVEL_BITS 9
  93
  94#define PT64_LEVEL_SHIFT(level) \
  95                (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
  96
  97#define PT64_LEVEL_MASK(level) \
  98                (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
  99
 100#define PT64_INDEX(address, level)\
 101        (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
 102
 103
 104#define PT32_LEVEL_BITS 10
 105
 106#define PT32_LEVEL_SHIFT(level) \
 107                (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
 108
 109#define PT32_LEVEL_MASK(level) \
 110                (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
 111#define PT32_LVL_OFFSET_MASK(level) \
 112        (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
 113                                                * PT32_LEVEL_BITS))) - 1))
 114
 115#define PT32_INDEX(address, level)\
 116        (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
 117
 118
 119#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
 120#define PT64_DIR_BASE_ADDR_MASK \
 121        (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
 122#define PT64_LVL_ADDR_MASK(level) \
 123        (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
 124                                                * PT64_LEVEL_BITS))) - 1))
 125#define PT64_LVL_OFFSET_MASK(level) \
 126        (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
 127                                                * PT64_LEVEL_BITS))) - 1))
 128
 129#define PT32_BASE_ADDR_MASK PAGE_MASK
 130#define PT32_DIR_BASE_ADDR_MASK \
 131        (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
 132#define PT32_LVL_ADDR_MASK(level) \
 133        (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
 134                                            * PT32_LEVEL_BITS))) - 1))
 135
 136#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
 137                        | PT64_NX_MASK)
 138
 139#define PFERR_PRESENT_MASK (1U << 0)
 140#define PFERR_WRITE_MASK (1U << 1)
 141#define PFERR_USER_MASK (1U << 2)
 142#define PFERR_RSVD_MASK (1U << 3)
 143#define PFERR_FETCH_MASK (1U << 4)
 144
 145#define PT_PDPE_LEVEL 3
 146#define PT_DIRECTORY_LEVEL 2
 147#define PT_PAGE_TABLE_LEVEL 1
 148
 149#define RMAP_EXT 4
 150
 151#define ACC_EXEC_MASK    1
 152#define ACC_WRITE_MASK   PT_WRITABLE_MASK
 153#define ACC_USER_MASK    PT_USER_MASK
 154#define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
 155
 156#define CREATE_TRACE_POINTS
 157#include "mmutrace.h"
 158
 159#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
 160
 161#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
 162
 163struct kvm_rmap_desc {
 164        u64 *sptes[RMAP_EXT];
 165        struct kvm_rmap_desc *more;
 166};
 167
 168struct kvm_shadow_walk_iterator {
 169        u64 addr;
 170        hpa_t shadow_addr;
 171        int level;
 172        u64 *sptep;
 173        unsigned index;
 174};
 175
 176#define for_each_shadow_entry(_vcpu, _addr, _walker)    \
 177        for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
 178             shadow_walk_okay(&(_walker));                      \
 179             shadow_walk_next(&(_walker)))
 180
 181
 182struct kvm_unsync_walk {
 183        int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
 184};
 185
 186typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
 187
 188static struct kmem_cache *pte_chain_cache;
 189static struct kmem_cache *rmap_desc_cache;
 190static struct kmem_cache *mmu_page_header_cache;
 191
 192static u64 __read_mostly shadow_trap_nonpresent_pte;
 193static u64 __read_mostly shadow_notrap_nonpresent_pte;
 194static u64 __read_mostly shadow_base_present_pte;
 195static u64 __read_mostly shadow_nx_mask;
 196static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
 197static u64 __read_mostly shadow_user_mask;
 198static u64 __read_mostly shadow_accessed_mask;
 199static u64 __read_mostly shadow_dirty_mask;
 200
 201static inline u64 rsvd_bits(int s, int e)
 202{
 203        return ((1ULL << (e - s + 1)) - 1) << s;
 204}
 205
 206void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
 207{
 208        shadow_trap_nonpresent_pte = trap_pte;
 209        shadow_notrap_nonpresent_pte = notrap_pte;
 210}
 211EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
 212
 213void kvm_mmu_set_base_ptes(u64 base_pte)
 214{
 215        shadow_base_present_pte = base_pte;
 216}
 217EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
 218
 219void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 220                u64 dirty_mask, u64 nx_mask, u64 x_mask)
 221{
 222        shadow_user_mask = user_mask;
 223        shadow_accessed_mask = accessed_mask;
 224        shadow_dirty_mask = dirty_mask;
 225        shadow_nx_mask = nx_mask;
 226        shadow_x_mask = x_mask;
 227}
 228EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 229
 230static int is_write_protection(struct kvm_vcpu *vcpu)
 231{
 232        return vcpu->arch.cr0 & X86_CR0_WP;
 233}
 234
 235static int is_cpuid_PSE36(void)
 236{
 237        return 1;
 238}
 239
 240static int is_nx(struct kvm_vcpu *vcpu)
 241{
 242        return vcpu->arch.shadow_efer & EFER_NX;
 243}
 244
 245static int is_shadow_present_pte(u64 pte)
 246{
 247        return pte != shadow_trap_nonpresent_pte
 248                && pte != shadow_notrap_nonpresent_pte;
 249}
 250
 251static int is_large_pte(u64 pte)
 252{
 253        return pte & PT_PAGE_SIZE_MASK;
 254}
 255
 256static int is_writeble_pte(unsigned long pte)
 257{
 258        return pte & PT_WRITABLE_MASK;
 259}
 260
 261static int is_dirty_gpte(unsigned long pte)
 262{
 263        return pte & PT_DIRTY_MASK;
 264}
 265
 266static int is_rmap_spte(u64 pte)
 267{
 268        return is_shadow_present_pte(pte);
 269}
 270
 271static int is_last_spte(u64 pte, int level)
 272{
 273        if (level == PT_PAGE_TABLE_LEVEL)
 274                return 1;
 275        if (is_large_pte(pte))
 276                return 1;
 277        return 0;
 278}
 279
 280static pfn_t spte_to_pfn(u64 pte)
 281{
 282        return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
 283}
 284
 285static gfn_t pse36_gfn_delta(u32 gpte)
 286{
 287        int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
 288
 289        return (gpte & PT32_DIR_PSE36_MASK) << shift;
 290}
 291
 292static void __set_spte(u64 *sptep, u64 spte)
 293{
 294#ifdef CONFIG_X86_64
 295        set_64bit((unsigned long *)sptep, spte);
 296#else
 297        set_64bit((unsigned long long *)sptep, spte);
 298#endif
 299}
 300
 301static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
 302                                  struct kmem_cache *base_cache, int min)
 303{
 304        void *obj;
 305
 306        if (cache->nobjs >= min)
 307                return 0;
 308        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
 309                obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
 310                if (!obj)
 311                        return -ENOMEM;
 312                cache->objects[cache->nobjs++] = obj;
 313        }
 314        return 0;
 315}
 316
 317static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 318{
 319        while (mc->nobjs)
 320                kfree(mc->objects[--mc->nobjs]);
 321}
 322
 323static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
 324                                       int min)
 325{
 326        struct page *page;
 327
 328        if (cache->nobjs >= min)
 329                return 0;
 330        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
 331                page = alloc_page(GFP_KERNEL);
 332                if (!page)
 333                        return -ENOMEM;
 334                set_page_private(page, 0);
 335                cache->objects[cache->nobjs++] = page_address(page);
 336        }
 337        return 0;
 338}
 339
 340static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
 341{
 342        while (mc->nobjs)
 343                free_page((unsigned long)mc->objects[--mc->nobjs]);
 344}
 345
 346static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
 347{
 348        int r;
 349
 350        r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
 351                                   pte_chain_cache, 4);
 352        if (r)
 353                goto out;
 354        r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
 355                                   rmap_desc_cache, 4);
 356        if (r)
 357                goto out;
 358        r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
 359        if (r)
 360                goto out;
 361        r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
 362                                   mmu_page_header_cache, 4);
 363out:
 364        return r;
 365}
 366
 367static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 368{
 369        mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
 370        mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
 371        mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
 372        mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 373}
 374
 375static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
 376                                    size_t size)
 377{
 378        void *p;
 379
 380        BUG_ON(!mc->nobjs);
 381        p = mc->objects[--mc->nobjs];
 382        return p;
 383}
 384
 385static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
 386{
 387        return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
 388                                      sizeof(struct kvm_pte_chain));
 389}
 390
 391static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
 392{
 393        kfree(pc);
 394}
 395
 396static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
 397{
 398        return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
 399                                      sizeof(struct kvm_rmap_desc));
 400}
 401
 402static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
 403{
 404        kfree(rd);
 405}
 406
 407/*
 408 * Return the pointer to the largepage write count for a given
 409 * gfn, handling slots that are not large page aligned.
 410 */
 411static int *slot_largepage_idx(gfn_t gfn,
 412                               struct kvm_memory_slot *slot,
 413                               int level)
 414{
 415        unsigned long idx;
 416
 417        idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
 418              (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
 419        return &slot->lpage_info[level - 2][idx].write_count;
 420}
 421
 422static void account_shadowed(struct kvm *kvm, gfn_t gfn)
 423{
 424        struct kvm_memory_slot *slot;
 425        int *write_count;
 426        int i;
 427
 428        gfn = unalias_gfn(kvm, gfn);
 429
 430        slot = gfn_to_memslot_unaliased(kvm, gfn);
 431        for (i = PT_DIRECTORY_LEVEL;
 432             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
 433                write_count   = slot_largepage_idx(gfn, slot, i);
 434                *write_count += 1;
 435        }
 436}
 437
 438static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
 439{
 440        struct kvm_memory_slot *slot;
 441        int *write_count;
 442        int i;
 443
 444        gfn = unalias_gfn(kvm, gfn);
 445        for (i = PT_DIRECTORY_LEVEL;
 446             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
 447                slot          = gfn_to_memslot_unaliased(kvm, gfn);
 448                write_count   = slot_largepage_idx(gfn, slot, i);
 449                *write_count -= 1;
 450                WARN_ON(*write_count < 0);
 451        }
 452}
 453
 454static int has_wrprotected_page(struct kvm *kvm,
 455                                gfn_t gfn,
 456                                int level)
 457{
 458        struct kvm_memory_slot *slot;
 459        int *largepage_idx;
 460
 461        gfn = unalias_gfn(kvm, gfn);
 462        slot = gfn_to_memslot_unaliased(kvm, gfn);
 463        if (slot) {
 464                largepage_idx = slot_largepage_idx(gfn, slot, level);
 465                return *largepage_idx;
 466        }
 467
 468        return 1;
 469}
 470
 471static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
 472{
 473        unsigned long page_size = PAGE_SIZE;
 474        struct vm_area_struct *vma;
 475        unsigned long addr;
 476        int i, ret = 0;
 477
 478        addr = gfn_to_hva(kvm, gfn);
 479        if (kvm_is_error_hva(addr))
 480                return page_size;
 481
 482        down_read(&current->mm->mmap_sem);
 483        vma = find_vma(current->mm, addr);
 484        if (!vma)
 485                goto out;
 486
 487        page_size = vma_kernel_pagesize(vma);
 488
 489out:
 490        up_read(&current->mm->mmap_sem);
 491
 492        for (i = PT_PAGE_TABLE_LEVEL;
 493             i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
 494                if (page_size >= KVM_HPAGE_SIZE(i))
 495                        ret = i;
 496                else
 497                        break;
 498        }
 499
 500        return ret;
 501}
 502
 503static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
 504{
 505        struct kvm_memory_slot *slot;
 506        int host_level;
 507        int level = PT_PAGE_TABLE_LEVEL;
 508
 509        slot = gfn_to_memslot(vcpu->kvm, large_gfn);
 510        if (slot && slot->dirty_bitmap)
 511                return PT_PAGE_TABLE_LEVEL;
 512
 513        host_level = host_mapping_level(vcpu->kvm, large_gfn);
 514
 515        if (host_level == PT_PAGE_TABLE_LEVEL)
 516                return host_level;
 517
 518        for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) {
 519
 520                if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
 521                        break;
 522        }
 523
 524        return level - 1;
 525}
 526
 527/*
 528 * Take gfn and return the reverse mapping to it.
 529 * Note: gfn must be unaliased before this function get called
 530 */
 531
 532static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
 533{
 534        struct kvm_memory_slot *slot;
 535        unsigned long idx;
 536
 537        slot = gfn_to_memslot(kvm, gfn);
 538        if (likely(level == PT_PAGE_TABLE_LEVEL))
 539                return &slot->rmap[gfn - slot->base_gfn];
 540
 541        idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
 542                (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
 543
 544        return &slot->lpage_info[level - 2][idx].rmap_pde;
 545}
 546
 547/*
 548 * Reverse mapping data structures:
 549 *
 550 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
 551 * that points to page_address(page).
 552 *
 553 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
 554 * containing more mappings.
 555 *
 556 * Returns the number of rmap entries before the spte was added or zero if
 557 * the spte was not added.
 558 *
 559 */
 560static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 561{
 562        struct kvm_mmu_page *sp;
 563        struct kvm_rmap_desc *desc;
 564        unsigned long *rmapp;
 565        int i, count = 0;
 566
 567        if (!is_rmap_spte(*spte))
 568                return count;
 569        gfn = unalias_gfn(vcpu->kvm, gfn);
 570        sp = page_header(__pa(spte));
 571        sp->gfns[spte - sp->spt] = gfn;
 572        rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
 573        if (!*rmapp) {
 574                rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
 575                *rmapp = (unsigned long)spte;
 576        } else if (!(*rmapp & 1)) {
 577                rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
 578                desc = mmu_alloc_rmap_desc(vcpu);
 579                desc->sptes[0] = (u64 *)*rmapp;
 580                desc->sptes[1] = spte;
 581                *rmapp = (unsigned long)desc | 1;
 582        } else {
 583                rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
 584                desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
 585                while (desc->sptes[RMAP_EXT-1] && desc->more) {
 586                        desc = desc->more;
 587                        count += RMAP_EXT;
 588                }
 589                if (desc->sptes[RMAP_EXT-1]) {
 590                        desc->more = mmu_alloc_rmap_desc(vcpu);
 591                        desc = desc->more;
 592                }
 593                for (i = 0; desc->sptes[i]; ++i)
 594                        ;
 595                desc->sptes[i] = spte;
 596        }
 597        return count;
 598}
 599
 600static void rmap_desc_remove_entry(unsigned long *rmapp,
 601                                   struct kvm_rmap_desc *desc,
 602                                   int i,
 603                                   struct kvm_rmap_desc *prev_desc)
 604{
 605        int j;
 606
 607        for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
 608                ;
 609        desc->sptes[i] = desc->sptes[j];
 610        desc->sptes[j] = NULL;
 611        if (j != 0)
 612                return;
 613        if (!prev_desc && !desc->more)
 614                *rmapp = (unsigned long)desc->sptes[0];
 615        else
 616                if (prev_desc)
 617                        prev_desc->more = desc->more;
 618                else
 619                        *rmapp = (unsigned long)desc->more | 1;
 620        mmu_free_rmap_desc(desc);
 621}
 622
 623static void rmap_remove(struct kvm *kvm, u64 *spte)
 624{
 625        struct kvm_rmap_desc *desc;
 626        struct kvm_rmap_desc *prev_desc;
 627        struct kvm_mmu_page *sp;
 628        pfn_t pfn;
 629        unsigned long *rmapp;
 630        int i;
 631
 632        if (!is_rmap_spte(*spte))
 633                return;
 634        sp = page_header(__pa(spte));
 635        pfn = spte_to_pfn(*spte);
 636        if (*spte & shadow_accessed_mask)
 637                kvm_set_pfn_accessed(pfn);
 638        if (is_writeble_pte(*spte))
 639                kvm_set_pfn_dirty(pfn);
 640        rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
 641        if (!*rmapp) {
 642                printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
 643                BUG();
 644        } else if (!(*rmapp & 1)) {
 645                rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
 646                if ((u64 *)*rmapp != spte) {
 647                        printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
 648                               spte, *spte);
 649                        BUG();
 650                }
 651                *rmapp = 0;
 652        } else {
 653                rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
 654                desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
 655                prev_desc = NULL;
 656                while (desc) {
 657                        for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
 658                                if (desc->sptes[i] == spte) {
 659                                        rmap_desc_remove_entry(rmapp,
 660                                                               desc, i,
 661                                                               prev_desc);
 662                                        return;
 663                                }
 664                        prev_desc = desc;
 665                        desc = desc->more;
 666                }
 667                BUG();
 668        }
 669}
 670
 671static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
 672{
 673        struct kvm_rmap_desc *desc;
 674        struct kvm_rmap_desc *prev_desc;
 675        u64 *prev_spte;
 676        int i;
 677
 678        if (!*rmapp)
 679                return NULL;
 680        else if (!(*rmapp & 1)) {
 681                if (!spte)
 682                        return (u64 *)*rmapp;
 683                return NULL;
 684        }
 685        desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
 686        prev_desc = NULL;
 687        prev_spte = NULL;
 688        while (desc) {
 689                for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
 690                        if (prev_spte == spte)
 691                                return desc->sptes[i];
 692                        prev_spte = desc->sptes[i];
 693                }
 694                desc = desc->more;
 695        }
 696        return NULL;
 697}
 698
 699static int rmap_write_protect(struct kvm *kvm, u64 gfn)
 700{
 701        unsigned long *rmapp;
 702        u64 *spte;
 703        int i, write_protected = 0;
 704
 705        gfn = unalias_gfn(kvm, gfn);
 706        rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
 707
 708        spte = rmap_next(kvm, rmapp, NULL);
 709        while (spte) {
 710                BUG_ON(!spte);
 711                BUG_ON(!(*spte & PT_PRESENT_MASK));
 712                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
 713                if (is_writeble_pte(*spte)) {
 714                        __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
 715                        write_protected = 1;
 716                }
 717                spte = rmap_next(kvm, rmapp, spte);
 718        }
 719        if (write_protected) {
 720                pfn_t pfn;
 721
 722                spte = rmap_next(kvm, rmapp, NULL);
 723                pfn = spte_to_pfn(*spte);
 724                kvm_set_pfn_dirty(pfn);
 725        }
 726
 727        /* check for huge page mappings */
 728        for (i = PT_DIRECTORY_LEVEL;
 729             i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
 730                rmapp = gfn_to_rmap(kvm, gfn, i);
 731                spte = rmap_next(kvm, rmapp, NULL);
 732                while (spte) {
 733                        BUG_ON(!spte);
 734                        BUG_ON(!(*spte & PT_PRESENT_MASK));
 735                        BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
 736                        pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
 737                        if (is_writeble_pte(*spte)) {
 738                                rmap_remove(kvm, spte);
 739                                --kvm->stat.lpages;
 740                                __set_spte(spte, shadow_trap_nonpresent_pte);
 741                                spte = NULL;
 742                                write_protected = 1;
 743                        }
 744                        spte = rmap_next(kvm, rmapp, spte);
 745                }
 746        }
 747
 748        return write_protected;
 749}
 750
 751static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
 752                           unsigned long data)
 753{
 754        u64 *spte;
 755        int need_tlb_flush = 0;
 756
 757        while ((spte = rmap_next(kvm, rmapp, NULL))) {
 758                BUG_ON(!(*spte & PT_PRESENT_MASK));
 759                rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
 760                rmap_remove(kvm, spte);
 761                __set_spte(spte, shadow_trap_nonpresent_pte);
 762                need_tlb_flush = 1;
 763        }
 764        return need_tlb_flush;
 765}
 766
 767static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
 768                             unsigned long data)
 769{
 770        int need_flush = 0;
 771        u64 *spte, new_spte;
 772        pte_t *ptep = (pte_t *)data;
 773        pfn_t new_pfn;
 774
 775        WARN_ON(pte_huge(*ptep));
 776        new_pfn = pte_pfn(*ptep);
 777        spte = rmap_next(kvm, rmapp, NULL);
 778        while (spte) {
 779                BUG_ON(!is_shadow_present_pte(*spte));
 780                rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
 781                need_flush = 1;
 782                if (pte_write(*ptep)) {
 783                        rmap_remove(kvm, spte);
 784                        __set_spte(spte, shadow_trap_nonpresent_pte);
 785                        spte = rmap_next(kvm, rmapp, NULL);
 786                } else {
 787                        new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
 788                        new_spte |= (u64)new_pfn << PAGE_SHIFT;
 789
 790                        new_spte &= ~PT_WRITABLE_MASK;
 791                        new_spte &= ~SPTE_HOST_WRITEABLE;
 792                        if (is_writeble_pte(*spte))
 793                                kvm_set_pfn_dirty(spte_to_pfn(*spte));
 794                        __set_spte(spte, new_spte);
 795                        spte = rmap_next(kvm, rmapp, spte);
 796                }
 797        }
 798        if (need_flush)
 799                kvm_flush_remote_tlbs(kvm);
 800
 801        return 0;
 802}
 803
 804static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
 805                          unsigned long data,
 806                          int (*handler)(struct kvm *kvm, unsigned long *rmapp,
 807                                         unsigned long data))
 808{
 809        int i, j;
 810        int retval = 0;
 811
 812        /*
 813         * If mmap_sem isn't taken, we can look the memslots with only
 814         * the mmu_lock by skipping over the slots with userspace_addr == 0.
 815         */
 816        for (i = 0; i < kvm->nmemslots; i++) {
 817                struct kvm_memory_slot *memslot = &kvm->memslots[i];
 818                unsigned long start = memslot->userspace_addr;
 819                unsigned long end;
 820
 821                /* mmu_lock protects userspace_addr */
 822                if (!start)
 823                        continue;
 824
 825                end = start + (memslot->npages << PAGE_SHIFT);
 826                if (hva >= start && hva < end) {
 827                        gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
 828
 829                        retval |= handler(kvm, &memslot->rmap[gfn_offset],
 830                                          data);
 831
 832                        for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
 833                                int idx = gfn_offset;
 834                                idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
 835                                retval |= handler(kvm,
 836                                        &memslot->lpage_info[j][idx].rmap_pde,
 837                                        data);
 838                        }
 839                }
 840        }
 841
 842        return retval;
 843}
 844
 845int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
 846{
 847        return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
 848}
 849
 850void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 851{
 852        kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
 853}
 854
 855static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
 856                         unsigned long data)
 857{
 858        u64 *spte;
 859        int young = 0;
 860
 861        /* always return old for EPT */
 862        if (!shadow_accessed_mask)
 863                return 0;
 864
 865        spte = rmap_next(kvm, rmapp, NULL);
 866        while (spte) {
 867                int _young;
 868                u64 _spte = *spte;
 869                BUG_ON(!(_spte & PT_PRESENT_MASK));
 870                _young = _spte & PT_ACCESSED_MASK;
 871                if (_young) {
 872                        young = 1;
 873                        clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
 874                }
 875                spte = rmap_next(kvm, rmapp, spte);
 876        }
 877        return young;
 878}
 879
 880#define RMAP_RECYCLE_THRESHOLD 1000
 881
 882static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 883{
 884        unsigned long *rmapp;
 885        struct kvm_mmu_page *sp;
 886
 887        sp = page_header(__pa(spte));
 888
 889        gfn = unalias_gfn(vcpu->kvm, gfn);
 890        rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
 891
 892        kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
 893        kvm_flush_remote_tlbs(vcpu->kvm);
 894}
 895
 896int kvm_age_hva(struct kvm *kvm, unsigned long hva)
 897{
 898        return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
 899}
 900
 901#ifdef MMU_DEBUG
 902static int is_empty_shadow_page(u64 *spt)
 903{
 904        u64 *pos;
 905        u64 *end;
 906
 907        for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
 908                if (is_shadow_present_pte(*pos)) {
 909                        printk(KERN_ERR "%s: %p %llx\n", __func__,
 910                               pos, *pos);
 911                        return 0;
 912                }
 913        return 1;
 914}
 915#endif
 916
 917static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 918{
 919        ASSERT(is_empty_shadow_page(sp->spt));
 920        list_del(&sp->link);
 921        __free_page(virt_to_page(sp->spt));
 922        __free_page(virt_to_page(sp->gfns));
 923        kfree(sp);
 924        ++kvm->arch.n_free_mmu_pages;
 925}
 926
 927static unsigned kvm_page_table_hashfn(gfn_t gfn)
 928{
 929        return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
 930}
 931
 932static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
 933                                               u64 *parent_pte)
 934{
 935        struct kvm_mmu_page *sp;
 936
 937        sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
 938        sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
 939        sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
 940        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
 941        list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
 942        INIT_LIST_HEAD(&sp->oos_link);
 943        bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
 944        sp->multimapped = 0;
 945        sp->parent_pte = parent_pte;
 946        --vcpu->kvm->arch.n_free_mmu_pages;
 947        return sp;
 948}
 949
 950static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
 951                                    struct kvm_mmu_page *sp, u64 *parent_pte)
 952{
 953        struct kvm_pte_chain *pte_chain;
 954        struct hlist_node *node;
 955        int i;
 956
 957        if (!parent_pte)
 958                return;
 959        if (!sp->multimapped) {
 960                u64 *old = sp->parent_pte;
 961
 962                if (!old) {
 963                        sp->parent_pte = parent_pte;
 964                        return;
 965                }
 966                sp->multimapped = 1;
 967                pte_chain = mmu_alloc_pte_chain(vcpu);
 968                INIT_HLIST_HEAD(&sp->parent_ptes);
 969                hlist_add_head(&pte_chain->link, &sp->parent_ptes);
 970                pte_chain->parent_ptes[0] = old;
 971        }
 972        hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
 973                if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
 974                        continue;
 975                for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
 976                        if (!pte_chain->parent_ptes[i]) {
 977                                pte_chain->parent_ptes[i] = parent_pte;
 978                                return;
 979                        }
 980        }
 981        pte_chain = mmu_alloc_pte_chain(vcpu);
 982        BUG_ON(!pte_chain);
 983        hlist_add_head(&pte_chain->link, &sp->parent_ptes);
 984        pte_chain->parent_ptes[0] = parent_pte;
 985}
 986
 987static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
 988                                       u64 *parent_pte)
 989{
 990        struct kvm_pte_chain *pte_chain;
 991        struct hlist_node *node;
 992        int i;
 993
 994        if (!sp->multimapped) {
 995                BUG_ON(sp->parent_pte != parent_pte);
 996                sp->parent_pte = NULL;
 997                return;
 998        }
 999        hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1000                for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1001                        if (!pte_chain->parent_ptes[i])
1002                                break;
1003                        if (pte_chain->parent_ptes[i] != parent_pte)
1004                                continue;
1005                        while (i + 1 < NR_PTE_CHAIN_ENTRIES
1006                                && pte_chain->parent_ptes[i + 1]) {
1007                                pte_chain->parent_ptes[i]
1008                                        = pte_chain->parent_ptes[i + 1];
1009                                ++i;
1010                        }
1011                        pte_chain->parent_ptes[i] = NULL;
1012                        if (i == 0) {
1013                                hlist_del(&pte_chain->link);
1014                                mmu_free_pte_chain(pte_chain);
1015                                if (hlist_empty(&sp->parent_ptes)) {
1016                                        sp->multimapped = 0;
1017                                        sp->parent_pte = NULL;
1018                                }
1019                        }
1020                        return;
1021                }
1022        BUG();
1023}
1024
1025
1026static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1027                            mmu_parent_walk_fn fn)
1028{
1029        struct kvm_pte_chain *pte_chain;
1030        struct hlist_node *node;
1031        struct kvm_mmu_page *parent_sp;
1032        int i;
1033
1034        if (!sp->multimapped && sp->parent_pte) {
1035                parent_sp = page_header(__pa(sp->parent_pte));
1036                fn(vcpu, parent_sp);
1037                mmu_parent_walk(vcpu, parent_sp, fn);
1038                return;
1039        }
1040        hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1041                for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1042                        if (!pte_chain->parent_ptes[i])
1043                                break;
1044                        parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
1045                        fn(vcpu, parent_sp);
1046                        mmu_parent_walk(vcpu, parent_sp, fn);
1047                }
1048}
1049
1050static void kvm_mmu_update_unsync_bitmap(u64 *spte)
1051{
1052        unsigned int index;
1053        struct kvm_mmu_page *sp = page_header(__pa(spte));
1054
1055        index = spte - sp->spt;
1056        if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
1057                sp->unsync_children++;
1058        WARN_ON(!sp->unsync_children);
1059}
1060
1061static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
1062{
1063        struct kvm_pte_chain *pte_chain;
1064        struct hlist_node *node;
1065        int i;
1066
1067        if (!sp->parent_pte)
1068                return;
1069
1070        if (!sp->multimapped) {
1071                kvm_mmu_update_unsync_bitmap(sp->parent_pte);
1072                return;
1073        }
1074
1075        hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1076                for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1077                        if (!pte_chain->parent_ptes[i])
1078                                break;
1079                        kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
1080                }
1081}
1082
1083static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1084{
1085        kvm_mmu_update_parents_unsync(sp);
1086        return 1;
1087}
1088
1089static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
1090                                        struct kvm_mmu_page *sp)
1091{
1092        mmu_parent_walk(vcpu, sp, unsync_walk_fn);
1093        kvm_mmu_update_parents_unsync(sp);
1094}
1095
1096static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1097                                    struct kvm_mmu_page *sp)
1098{
1099        int i;
1100
1101        for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1102                sp->spt[i] = shadow_trap_nonpresent_pte;
1103}
1104
1105static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1106                               struct kvm_mmu_page *sp)
1107{
1108        return 1;
1109}
1110
1111static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1112{
1113}
1114
1115#define KVM_PAGE_ARRAY_NR 16
1116
1117struct kvm_mmu_pages {
1118        struct mmu_page_and_offset {
1119                struct kvm_mmu_page *sp;
1120                unsigned int idx;
1121        } page[KVM_PAGE_ARRAY_NR];
1122        unsigned int nr;
1123};
1124
1125#define for_each_unsync_children(bitmap, idx)           \
1126        for (idx = find_first_bit(bitmap, 512);         \
1127             idx < 512;                                 \
1128             idx = find_next_bit(bitmap, 512, idx+1))
1129
1130static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1131                         int idx)
1132{
1133        int i;
1134
1135        if (sp->unsync)
1136                for (i=0; i < pvec->nr; i++)
1137                        if (pvec->page[i].sp == sp)
1138                                return 0;
1139
1140        pvec->page[pvec->nr].sp = sp;
1141        pvec->page[pvec->nr].idx = idx;
1142        pvec->nr++;
1143        return (pvec->nr == KVM_PAGE_ARRAY_NR);
1144}
1145
1146static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1147                           struct kvm_mmu_pages *pvec)
1148{
1149        int i, ret, nr_unsync_leaf = 0;
1150
1151        for_each_unsync_children(sp->unsync_child_bitmap, i) {
1152                u64 ent = sp->spt[i];
1153
1154                if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1155                        struct kvm_mmu_page *child;
1156                        child = page_header(ent & PT64_BASE_ADDR_MASK);
1157
1158                        if (child->unsync_children) {
1159                                if (mmu_pages_add(pvec, child, i))
1160                                        return -ENOSPC;
1161
1162                                ret = __mmu_unsync_walk(child, pvec);
1163                                if (!ret)
1164                                        __clear_bit(i, sp->unsync_child_bitmap);
1165                                else if (ret > 0)
1166                                        nr_unsync_leaf += ret;
1167                                else
1168                                        return ret;
1169                        }
1170
1171                        if (child->unsync) {
1172                                nr_unsync_leaf++;
1173                                if (mmu_pages_add(pvec, child, i))
1174                                        return -ENOSPC;
1175                        }
1176                }
1177        }
1178
1179        if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1180                sp->unsync_children = 0;
1181
1182        return nr_unsync_leaf;
1183}
1184
1185static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1186                           struct kvm_mmu_pages *pvec)
1187{
1188        if (!sp->unsync_children)
1189                return 0;
1190
1191        mmu_pages_add(pvec, sp, 0);
1192        return __mmu_unsync_walk(sp, pvec);
1193}
1194
1195static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1196{
1197        unsigned index;
1198        struct hlist_head *bucket;
1199        struct kvm_mmu_page *sp;
1200        struct hlist_node *node;
1201
1202        pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1203        index = kvm_page_table_hashfn(gfn);
1204        bucket = &kvm->arch.mmu_page_hash[index];
1205        hlist_for_each_entry(sp, node, bucket, hash_link)
1206                if (sp->gfn == gfn && !sp->role.direct
1207                    && !sp->role.invalid) {
1208                        pgprintk("%s: found role %x\n",
1209                                 __func__, sp->role.word);
1210                        return sp;
1211                }
1212        return NULL;
1213}
1214
1215static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1216{
1217        WARN_ON(!sp->unsync);
1218        sp->unsync = 0;
1219        --kvm->stat.mmu_unsync;
1220}
1221
1222static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1223
1224static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1225{
1226        if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1227                kvm_mmu_zap_page(vcpu->kvm, sp);
1228                return 1;
1229        }
1230
1231        trace_kvm_mmu_sync_page(sp);
1232        if (rmap_write_protect(vcpu->kvm, sp->gfn))
1233                kvm_flush_remote_tlbs(vcpu->kvm);
1234        kvm_unlink_unsync_page(vcpu->kvm, sp);
1235        if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1236                kvm_mmu_zap_page(vcpu->kvm, sp);
1237                return 1;
1238        }
1239
1240        kvm_mmu_flush_tlb(vcpu);
1241        return 0;
1242}
1243
1244struct mmu_page_path {
1245        struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1246        unsigned int idx[PT64_ROOT_LEVEL-1];
1247};
1248
1249#define for_each_sp(pvec, sp, parents, i)                       \
1250                for (i = mmu_pages_next(&pvec, &parents, -1),   \
1251                        sp = pvec.page[i].sp;                   \
1252                        i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1253                        i = mmu_pages_next(&pvec, &parents, i))
1254
1255static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1256                          struct mmu_page_path *parents,
1257                          int i)
1258{
1259        int n;
1260
1261        for (n = i+1; n < pvec->nr; n++) {
1262                struct kvm_mmu_page *sp = pvec->page[n].sp;
1263
1264                if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1265                        parents->idx[0] = pvec->page[n].idx;
1266                        return n;
1267                }
1268
1269                parents->parent[sp->role.level-2] = sp;
1270                parents->idx[sp->role.level-1] = pvec->page[n].idx;
1271        }
1272
1273        return n;
1274}
1275
1276static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1277{
1278        struct kvm_mmu_page *sp;
1279        unsigned int level = 0;
1280
1281        do {
1282                unsigned int idx = parents->idx[level];
1283
1284                sp = parents->parent[level];
1285                if (!sp)
1286                        return;
1287
1288                --sp->unsync_children;
1289                WARN_ON((int)sp->unsync_children < 0);
1290                __clear_bit(idx, sp->unsync_child_bitmap);
1291                level++;
1292        } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1293}
1294
1295static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1296                               struct mmu_page_path *parents,
1297                               struct kvm_mmu_pages *pvec)
1298{
1299        parents->parent[parent->role.level-1] = NULL;
1300        pvec->nr = 0;
1301}
1302
1303static void mmu_sync_children(struct kvm_vcpu *vcpu,
1304                              struct kvm_mmu_page *parent)
1305{
1306        int i;
1307        struct kvm_mmu_page *sp;
1308        struct mmu_page_path parents;
1309        struct kvm_mmu_pages pages;
1310
1311        kvm_mmu_pages_init(parent, &parents, &pages);
1312        while (mmu_unsync_walk(parent, &pages)) {
1313                int protected = 0;
1314
1315                for_each_sp(pages, sp, parents, i)
1316                        protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1317
1318                if (protected)
1319                        kvm_flush_remote_tlbs(vcpu->kvm);
1320
1321                for_each_sp(pages, sp, parents, i) {
1322                        kvm_sync_page(vcpu, sp);
1323                        mmu_pages_clear_parents(&parents);
1324                }
1325                cond_resched_lock(&vcpu->kvm->mmu_lock);
1326                kvm_mmu_pages_init(parent, &parents, &pages);
1327        }
1328}
1329
1330static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1331                                             gfn_t gfn,
1332                                             gva_t gaddr,
1333                                             unsigned level,
1334                                             int direct,
1335                                             unsigned access,
1336                                             u64 *parent_pte)
1337{
1338        union kvm_mmu_page_role role;
1339        unsigned index;
1340        unsigned quadrant;
1341        struct hlist_head *bucket;
1342        struct kvm_mmu_page *sp;
1343        struct hlist_node *node, *tmp;
1344
1345        role = vcpu->arch.mmu.base_role;
1346        role.level = level;
1347        role.direct = direct;
1348        role.access = access;
1349        if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1350                quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1351                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1352                role.quadrant = quadrant;
1353        }
1354        index = kvm_page_table_hashfn(gfn);
1355        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1356        hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1357                if (sp->gfn == gfn) {
1358                        if (sp->unsync)
1359                                if (kvm_sync_page(vcpu, sp))
1360                                        continue;
1361
1362                        if (sp->role.word != role.word)
1363                                continue;
1364
1365                        mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1366                        if (sp->unsync_children) {
1367                                set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1368                                kvm_mmu_mark_parents_unsync(vcpu, sp);
1369                        }
1370                        trace_kvm_mmu_get_page(sp, false);
1371                        return sp;
1372                }
1373        ++vcpu->kvm->stat.mmu_cache_miss;
1374        sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1375        if (!sp)
1376                return sp;
1377        sp->gfn = gfn;
1378        sp->role = role;
1379        hlist_add_head(&sp->hash_link, bucket);
1380        if (!direct) {
1381                if (rmap_write_protect(vcpu->kvm, gfn))
1382                        kvm_flush_remote_tlbs(vcpu->kvm);
1383                account_shadowed(vcpu->kvm, gfn);
1384        }
1385        if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1386                vcpu->arch.mmu.prefetch_page(vcpu, sp);
1387        else
1388                nonpaging_prefetch_page(vcpu, sp);
1389        trace_kvm_mmu_get_page(sp, true);
1390        return sp;
1391}
1392
1393static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1394                             struct kvm_vcpu *vcpu, u64 addr)
1395{
1396        iterator->addr = addr;
1397        iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1398        iterator->level = vcpu->arch.mmu.shadow_root_level;
1399        if (iterator->level == PT32E_ROOT_LEVEL) {
1400                iterator->shadow_addr
1401                        = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1402                iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1403                --iterator->level;
1404                if (!iterator->shadow_addr)
1405                        iterator->level = 0;
1406        }
1407}
1408
1409static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1410{
1411        if (iterator->level < PT_PAGE_TABLE_LEVEL)
1412                return false;
1413
1414        if (iterator->level == PT_PAGE_TABLE_LEVEL)
1415                if (is_large_pte(*iterator->sptep))
1416                        return false;
1417
1418        iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1419        iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1420        return true;
1421}
1422
1423static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1424{
1425        iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1426        --iterator->level;
1427}
1428
1429static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1430                                         struct kvm_mmu_page *sp)
1431{
1432        unsigned i;
1433        u64 *pt;
1434        u64 ent;
1435
1436        pt = sp->spt;
1437
1438        for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1439                ent = pt[i];
1440
1441                if (is_shadow_present_pte(ent)) {
1442                        if (!is_last_spte(ent, sp->role.level)) {
1443                                ent &= PT64_BASE_ADDR_MASK;
1444                                mmu_page_remove_parent_pte(page_header(ent),
1445                                                           &pt[i]);
1446                        } else {
1447                                if (is_large_pte(ent))
1448                                        --kvm->stat.lpages;
1449                                rmap_remove(kvm, &pt[i]);
1450                        }
1451                }
1452                pt[i] = shadow_trap_nonpresent_pte;
1453        }
1454}
1455
1456static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1457{
1458        mmu_page_remove_parent_pte(sp, parent_pte);
1459}
1460
1461static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1462{
1463        int i;
1464        struct kvm_vcpu *vcpu;
1465
1466        kvm_for_each_vcpu(i, vcpu, kvm)
1467                vcpu->arch.last_pte_updated = NULL;
1468}
1469
1470static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1471{
1472        u64 *parent_pte;
1473
1474        while (sp->multimapped || sp->parent_pte) {
1475                if (!sp->multimapped)
1476                        parent_pte = sp->parent_pte;
1477                else {
1478                        struct kvm_pte_chain *chain;
1479
1480                        chain = container_of(sp->parent_ptes.first,
1481                                             struct kvm_pte_chain, link);
1482                        parent_pte = chain->parent_ptes[0];
1483                }
1484                BUG_ON(!parent_pte);
1485                kvm_mmu_put_page(sp, parent_pte);
1486                __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1487        }
1488}
1489
1490static int mmu_zap_unsync_children(struct kvm *kvm,
1491                                   struct kvm_mmu_page *parent)
1492{
1493        int i, zapped = 0;
1494        struct mmu_page_path parents;
1495        struct kvm_mmu_pages pages;
1496
1497        if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1498                return 0;
1499
1500        kvm_mmu_pages_init(parent, &parents, &pages);
1501        while (mmu_unsync_walk(parent, &pages)) {
1502                struct kvm_mmu_page *sp;
1503
1504                for_each_sp(pages, sp, parents, i) {
1505                        kvm_mmu_zap_page(kvm, sp);
1506                        mmu_pages_clear_parents(&parents);
1507                }
1508                zapped += pages.nr;
1509                kvm_mmu_pages_init(parent, &parents, &pages);
1510        }
1511
1512        return zapped;
1513}
1514
1515static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1516{
1517        int ret;
1518
1519        trace_kvm_mmu_zap_page(sp);
1520        ++kvm->stat.mmu_shadow_zapped;
1521        ret = mmu_zap_unsync_children(kvm, sp);
1522        kvm_mmu_page_unlink_children(kvm, sp);
1523        kvm_mmu_unlink_parents(kvm, sp);
1524        kvm_flush_remote_tlbs(kvm);
1525        if (!sp->role.invalid && !sp->role.direct)
1526                unaccount_shadowed(kvm, sp->gfn);
1527        if (sp->unsync)
1528                kvm_unlink_unsync_page(kvm, sp);
1529        if (!sp->root_count) {
1530                hlist_del(&sp->hash_link);
1531                kvm_mmu_free_page(kvm, sp);
1532        } else {
1533                sp->role.invalid = 1;
1534                list_move(&sp->link, &kvm->arch.active_mmu_pages);
1535                kvm_reload_remote_mmus(kvm);
1536        }
1537        kvm_mmu_reset_last_pte_updated(kvm);
1538        return ret;
1539}
1540
1541/*
1542 * Changing the number of mmu pages allocated to the vm
1543 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1544 */
1545void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1546{
1547        int used_pages;
1548
1549        used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1550        used_pages = max(0, used_pages);
1551
1552        /*
1553         * If we set the number of mmu pages to be smaller be than the
1554         * number of actived pages , we must to free some mmu pages before we
1555         * change the value
1556         */
1557
1558        if (used_pages > kvm_nr_mmu_pages) {
1559                while (used_pages > kvm_nr_mmu_pages) {
1560                        struct kvm_mmu_page *page;
1561
1562                        page = container_of(kvm->arch.active_mmu_pages.prev,
1563                                            struct kvm_mmu_page, link);
1564                        kvm_mmu_zap_page(kvm, page);
1565                        used_pages--;
1566                }
1567                kvm->arch.n_free_mmu_pages = 0;
1568        }
1569        else
1570                kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1571                                         - kvm->arch.n_alloc_mmu_pages;
1572
1573        kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1574}
1575
1576static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1577{
1578        unsigned index;
1579        struct hlist_head *bucket;
1580        struct kvm_mmu_page *sp;
1581        struct hlist_node *node, *n;
1582        int r;
1583
1584        pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1585        r = 0;
1586        index = kvm_page_table_hashfn(gfn);
1587        bucket = &kvm->arch.mmu_page_hash[index];
1588        hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1589                if (sp->gfn == gfn && !sp->role.direct) {
1590                        pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1591                                 sp->role.word);
1592                        r = 1;
1593                        if (kvm_mmu_zap_page(kvm, sp))
1594                                n = bucket->first;
1595                }
1596        return r;
1597}
1598
1599static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1600{
1601        unsigned index;
1602        struct hlist_head *bucket;
1603        struct kvm_mmu_page *sp;
1604        struct hlist_node *node, *nn;
1605
1606        index = kvm_page_table_hashfn(gfn);
1607        bucket = &kvm->arch.mmu_page_hash[index];
1608        hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1609                if (sp->gfn == gfn && !sp->role.direct
1610                    && !sp->role.invalid) {
1611                        pgprintk("%s: zap %lx %x\n",
1612                                 __func__, gfn, sp->role.word);
1613                        kvm_mmu_zap_page(kvm, sp);
1614                }
1615        }
1616}
1617
1618static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1619{
1620        int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1621        struct kvm_mmu_page *sp = page_header(__pa(pte));
1622
1623        __set_bit(slot, sp->slot_bitmap);
1624}
1625
1626static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1627{
1628        int i;
1629        u64 *pt = sp->spt;
1630
1631        if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1632                return;
1633
1634        for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1635                if (pt[i] == shadow_notrap_nonpresent_pte)
1636                        __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1637        }
1638}
1639
1640struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1641{
1642        struct page *page;
1643
1644        gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1645
1646        if (gpa == UNMAPPED_GVA)
1647                return NULL;
1648
1649        page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1650
1651        return page;
1652}
1653
1654/*
1655 * The function is based on mtrr_type_lookup() in
1656 * arch/x86/kernel/cpu/mtrr/generic.c
1657 */
1658static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1659                         u64 start, u64 end)
1660{
1661        int i;
1662        u64 base, mask;
1663        u8 prev_match, curr_match;
1664        int num_var_ranges = KVM_NR_VAR_MTRR;
1665
1666        if (!mtrr_state->enabled)
1667                return 0xFF;
1668
1669        /* Make end inclusive end, instead of exclusive */
1670        end--;
1671
1672        /* Look in fixed ranges. Just return the type as per start */
1673        if (mtrr_state->have_fixed && (start < 0x100000)) {
1674                int idx;
1675
1676                if (start < 0x80000) {
1677                        idx = 0;
1678                        idx += (start >> 16);
1679                        return mtrr_state->fixed_ranges[idx];
1680                } else if (start < 0xC0000) {
1681                        idx = 1 * 8;
1682                        idx += ((start - 0x80000) >> 14);
1683                        return mtrr_state->fixed_ranges[idx];
1684                } else if (start < 0x1000000) {
1685                        idx = 3 * 8;
1686                        idx += ((start - 0xC0000) >> 12);
1687                        return mtrr_state->fixed_ranges[idx];
1688                }
1689        }
1690
1691        /*
1692         * Look in variable ranges
1693         * Look of multiple ranges matching this address and pick type
1694         * as per MTRR precedence
1695         */
1696        if (!(mtrr_state->enabled & 2))
1697                return mtrr_state->def_type;
1698
1699        prev_match = 0xFF;
1700        for (i = 0; i < num_var_ranges; ++i) {
1701                unsigned short start_state, end_state;
1702
1703                if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1704                        continue;
1705
1706                base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1707                       (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1708                mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1709                       (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1710
1711                start_state = ((start & mask) == (base & mask));
1712                end_state = ((end & mask) == (base & mask));
1713                if (start_state != end_state)
1714                        return 0xFE;
1715
1716                if ((start & mask) != (base & mask))
1717                        continue;
1718
1719                curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1720                if (prev_match == 0xFF) {
1721                        prev_match = curr_match;
1722                        continue;
1723                }
1724
1725                if (prev_match == MTRR_TYPE_UNCACHABLE ||
1726                    curr_match == MTRR_TYPE_UNCACHABLE)
1727                        return MTRR_TYPE_UNCACHABLE;
1728
1729                if ((prev_match == MTRR_TYPE_WRBACK &&
1730                     curr_match == MTRR_TYPE_WRTHROUGH) ||
1731                    (prev_match == MTRR_TYPE_WRTHROUGH &&
1732                     curr_match == MTRR_TYPE_WRBACK)) {
1733                        prev_match = MTRR_TYPE_WRTHROUGH;
1734                        curr_match = MTRR_TYPE_WRTHROUGH;
1735                }
1736
1737                if (prev_match != curr_match)
1738                        return MTRR_TYPE_UNCACHABLE;
1739        }
1740
1741        if (prev_match != 0xFF)
1742                return prev_match;
1743
1744        return mtrr_state->def_type;
1745}
1746
1747u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1748{
1749        u8 mtrr;
1750
1751        mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1752                             (gfn << PAGE_SHIFT) + PAGE_SIZE);
1753        if (mtrr == 0xfe || mtrr == 0xff)
1754                mtrr = MTRR_TYPE_WRBACK;
1755        return mtrr;
1756}
1757EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1758
1759static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1760{
1761        unsigned index;
1762        struct hlist_head *bucket;
1763        struct kvm_mmu_page *s;
1764        struct hlist_node *node, *n;
1765
1766        trace_kvm_mmu_unsync_page(sp);
1767        index = kvm_page_table_hashfn(sp->gfn);
1768        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1769        /* don't unsync if pagetable is shadowed with multiple roles */
1770        hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1771                if (s->gfn != sp->gfn || s->role.direct)
1772                        continue;
1773                if (s->role.word != sp->role.word)
1774                        return 1;
1775        }
1776        ++vcpu->kvm->stat.mmu_unsync;
1777        sp->unsync = 1;
1778
1779        kvm_mmu_mark_parents_unsync(vcpu, sp);
1780
1781        mmu_convert_notrap(sp);
1782        return 0;
1783}
1784
1785static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1786                                  bool can_unsync)
1787{
1788        struct kvm_mmu_page *shadow;
1789
1790        shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1791        if (shadow) {
1792                if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1793                        return 1;
1794                if (shadow->unsync)
1795                        return 0;
1796                if (can_unsync && oos_shadow)
1797                        return kvm_unsync_page(vcpu, shadow);
1798                return 1;
1799        }
1800        return 0;
1801}
1802
1803static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1804                    unsigned pte_access, int user_fault,
1805                    int write_fault, int dirty, int level,
1806                    gfn_t gfn, pfn_t pfn, bool speculative,
1807                    bool can_unsync, bool reset_host_protection)
1808{
1809        u64 spte;
1810        int ret = 0;
1811
1812        /*
1813         * We don't set the accessed bit, since we sometimes want to see
1814         * whether the guest actually used the pte (in order to detect
1815         * demand paging).
1816         */
1817        spte = shadow_base_present_pte | shadow_dirty_mask;
1818        if (!speculative)
1819                spte |= shadow_accessed_mask;
1820        if (!dirty)
1821                pte_access &= ~ACC_WRITE_MASK;
1822        if (pte_access & ACC_EXEC_MASK)
1823                spte |= shadow_x_mask;
1824        else
1825                spte |= shadow_nx_mask;
1826        if (pte_access & ACC_USER_MASK)
1827                spte |= shadow_user_mask;
1828        if (level > PT_PAGE_TABLE_LEVEL)
1829                spte |= PT_PAGE_SIZE_MASK;
1830        if (tdp_enabled)
1831                spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1832                        kvm_is_mmio_pfn(pfn));
1833
1834        if (reset_host_protection)
1835                spte |= SPTE_HOST_WRITEABLE;
1836
1837        spte |= (u64)pfn << PAGE_SHIFT;
1838
1839        if ((pte_access & ACC_WRITE_MASK)
1840            || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1841
1842                if (level > PT_PAGE_TABLE_LEVEL &&
1843                    has_wrprotected_page(vcpu->kvm, gfn, level)) {
1844                        ret = 1;
1845                        spte = shadow_trap_nonpresent_pte;
1846                        goto set_pte;
1847                }
1848
1849                spte |= PT_WRITABLE_MASK;
1850
1851                /*
1852                 * Optimization: for pte sync, if spte was writable the hash
1853                 * lookup is unnecessary (and expensive). Write protection
1854                 * is responsibility of mmu_get_page / kvm_sync_page.
1855                 * Same reasoning can be applied to dirty page accounting.
1856                 */
1857                if (!can_unsync && is_writeble_pte(*sptep))
1858                        goto set_pte;
1859
1860                if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1861                        pgprintk("%s: found shadow page for %lx, marking ro\n",
1862                                 __func__, gfn);
1863                        ret = 1;
1864                        pte_access &= ~ACC_WRITE_MASK;
1865                        if (is_writeble_pte(spte))
1866                                spte &= ~PT_WRITABLE_MASK;
1867                }
1868        }
1869
1870        if (pte_access & ACC_WRITE_MASK)
1871                mark_page_dirty(vcpu->kvm, gfn);
1872
1873set_pte:
1874        __set_spte(sptep, spte);
1875        return ret;
1876}
1877
1878static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1879                         unsigned pt_access, unsigned pte_access,
1880                         int user_fault, int write_fault, int dirty,
1881                         int *ptwrite, int level, gfn_t gfn,
1882                         pfn_t pfn, bool speculative,
1883                         bool reset_host_protection)
1884{
1885        int was_rmapped = 0;
1886        int was_writeble = is_writeble_pte(*sptep);
1887        int rmap_count;
1888
1889        pgprintk("%s: spte %llx access %x write_fault %d"
1890                 " user_fault %d gfn %lx\n",
1891                 __func__, *sptep, pt_access,
1892                 write_fault, user_fault, gfn);
1893
1894        if (is_rmap_spte(*sptep)) {
1895                /*
1896                 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1897                 * the parent of the now unreachable PTE.
1898                 */
1899                if (level > PT_PAGE_TABLE_LEVEL &&
1900                    !is_large_pte(*sptep)) {
1901                        struct kvm_mmu_page *child;
1902                        u64 pte = *sptep;
1903
1904                        child = page_header(pte & PT64_BASE_ADDR_MASK);
1905                        mmu_page_remove_parent_pte(child, sptep);
1906                } else if (pfn != spte_to_pfn(*sptep)) {
1907                        pgprintk("hfn old %lx new %lx\n",
1908                                 spte_to_pfn(*sptep), pfn);
1909                        rmap_remove(vcpu->kvm, sptep);
1910                } else
1911                        was_rmapped = 1;
1912        }
1913
1914        if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1915                      dirty, level, gfn, pfn, speculative, true,
1916                      reset_host_protection)) {
1917                if (write_fault)
1918                        *ptwrite = 1;
1919                kvm_x86_ops->tlb_flush(vcpu);
1920        }
1921
1922        pgprintk("%s: setting spte %llx\n", __func__, *sptep);
1923        pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1924                 is_large_pte(*sptep)? "2MB" : "4kB",
1925                 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
1926                 *sptep, sptep);
1927        if (!was_rmapped && is_large_pte(*sptep))
1928                ++vcpu->kvm->stat.lpages;
1929
1930        page_header_update_slot(vcpu->kvm, sptep, gfn);
1931        if (!was_rmapped) {
1932                rmap_count = rmap_add(vcpu, sptep, gfn);
1933                kvm_release_pfn_clean(pfn);
1934                if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1935                        rmap_recycle(vcpu, sptep, gfn);
1936        } else {
1937                if (was_writeble)
1938                        kvm_release_pfn_dirty(pfn);
1939                else
1940                        kvm_release_pfn_clean(pfn);
1941        }
1942        if (speculative) {
1943                vcpu->arch.last_pte_updated = sptep;
1944                vcpu->arch.last_pte_gfn = gfn;
1945        }
1946}
1947
1948static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1949{
1950}
1951
1952static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1953                        int level, gfn_t gfn, pfn_t pfn)
1954{
1955        struct kvm_shadow_walk_iterator iterator;
1956        struct kvm_mmu_page *sp;
1957        int pt_write = 0;
1958        gfn_t pseudo_gfn;
1959
1960        for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1961                if (iterator.level == level) {
1962                        mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1963                                     0, write, 1, &pt_write,
1964                                     level, gfn, pfn, false, true);
1965                        ++vcpu->stat.pf_fixed;
1966                        break;
1967                }
1968
1969                if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1970                        pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1971                        sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1972                                              iterator.level - 1,
1973                                              1, ACC_ALL, iterator.sptep);
1974                        if (!sp) {
1975                                pgprintk("nonpaging_map: ENOMEM\n");
1976                                kvm_release_pfn_clean(pfn);
1977                                return -ENOMEM;
1978                        }
1979
1980                        __set_spte(iterator.sptep,
1981                                   __pa(sp->spt)
1982                                   | PT_PRESENT_MASK | PT_WRITABLE_MASK
1983                                   | shadow_user_mask | shadow_x_mask);
1984                }
1985        }
1986        return pt_write;
1987}
1988
1989static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1990{
1991        int r;
1992        int level;
1993        pfn_t pfn;
1994        unsigned long mmu_seq;
1995
1996        level = mapping_level(vcpu, gfn);
1997
1998        /*
1999         * This path builds a PAE pagetable - so we can map 2mb pages at
2000         * maximum. Therefore check if the level is larger than that.
2001         */
2002        if (level > PT_DIRECTORY_LEVEL)
2003                level = PT_DIRECTORY_LEVEL;
2004
2005        gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2006
2007        mmu_seq = vcpu->kvm->mmu_notifier_seq;
2008        smp_rmb();
2009        pfn = gfn_to_pfn(vcpu->kvm, gfn);
2010
2011        /* mmio */
2012        if (is_error_pfn(pfn)) {
2013                kvm_release_pfn_clean(pfn);
2014                return 1;
2015        }
2016
2017        spin_lock(&vcpu->kvm->mmu_lock);
2018        if (mmu_notifier_retry(vcpu, mmu_seq))
2019                goto out_unlock;
2020        kvm_mmu_free_some_pages(vcpu);
2021        r = __direct_map(vcpu, v, write, level, gfn, pfn);
2022        spin_unlock(&vcpu->kvm->mmu_lock);
2023
2024
2025        return r;
2026
2027out_unlock:
2028        spin_unlock(&vcpu->kvm->mmu_lock);
2029        kvm_release_pfn_clean(pfn);
2030        return 0;
2031}
2032
2033
2034static void mmu_free_roots(struct kvm_vcpu *vcpu)
2035{
2036        int i;
2037        struct kvm_mmu_page *sp;
2038
2039        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2040                return;
2041        spin_lock(&vcpu->kvm->mmu_lock);
2042        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2043                hpa_t root = vcpu->arch.mmu.root_hpa;
2044
2045                sp = page_header(root);
2046                --sp->root_count;
2047                if (!sp->root_count && sp->role.invalid)
2048                        kvm_mmu_zap_page(vcpu->kvm, sp);
2049                vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2050                spin_unlock(&vcpu->kvm->mmu_lock);
2051                return;
2052        }
2053        for (i = 0; i < 4; ++i) {
2054                hpa_t root = vcpu->arch.mmu.pae_root[i];
2055
2056                if (root) {
2057                        root &= PT64_BASE_ADDR_MASK;
2058                        sp = page_header(root);
2059                        --sp->root_count;
2060                        if (!sp->root_count && sp->role.invalid)
2061                                kvm_mmu_zap_page(vcpu->kvm, sp);
2062                }
2063                vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2064        }
2065        spin_unlock(&vcpu->kvm->mmu_lock);
2066        vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2067}
2068
2069static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2070{
2071        int ret = 0;
2072
2073        if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2074                set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
2075                ret = 1;
2076        }
2077
2078        return ret;
2079}
2080
2081static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2082{
2083        int i;
2084        gfn_t root_gfn;
2085        struct kvm_mmu_page *sp;
2086        int direct = 0;
2087        u64 pdptr;
2088
2089        root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
2090
2091        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2092                hpa_t root = vcpu->arch.mmu.root_hpa;
2093
2094                ASSERT(!VALID_PAGE(root));
2095                if (tdp_enabled)
2096                        direct = 1;
2097                if (mmu_check_root(vcpu, root_gfn))
2098                        return 1;
2099                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
2100                                      PT64_ROOT_LEVEL, direct,
2101                                      ACC_ALL, NULL);
2102                root = __pa(sp->spt);
2103                ++sp->root_count;
2104                vcpu->arch.mmu.root_hpa = root;
2105                return 0;
2106        }
2107        direct = !is_paging(vcpu);
2108        if (tdp_enabled)
2109                direct = 1;
2110        for (i = 0; i < 4; ++i) {
2111                hpa_t root = vcpu->arch.mmu.pae_root[i];
2112
2113                ASSERT(!VALID_PAGE(root));
2114                if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2115                        pdptr = kvm_pdptr_read(vcpu, i);
2116                        if (!is_present_gpte(pdptr)) {
2117                                vcpu->arch.mmu.pae_root[i] = 0;
2118                                continue;
2119                        }
2120                        root_gfn = pdptr >> PAGE_SHIFT;
2121                } else if (vcpu->arch.mmu.root_level == 0)
2122                        root_gfn = 0;
2123                if (mmu_check_root(vcpu, root_gfn))
2124                        return 1;
2125                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2126                                      PT32_ROOT_LEVEL, direct,
2127                                      ACC_ALL, NULL);
2128                root = __pa(sp->spt);
2129                ++sp->root_count;
2130                vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2131        }
2132        vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2133        return 0;
2134}
2135
2136static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2137{
2138        int i;
2139        struct kvm_mmu_page *sp;
2140
2141        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2142                return;
2143        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2144                hpa_t root = vcpu->arch.mmu.root_hpa;
2145                sp = page_header(root);
2146                mmu_sync_children(vcpu, sp);
2147                return;
2148        }
2149        for (i = 0; i < 4; ++i) {
2150                hpa_t root = vcpu->arch.mmu.pae_root[i];
2151
2152                if (root && VALID_PAGE(root)) {
2153                        root &= PT64_BASE_ADDR_MASK;
2154                        sp = page_header(root);
2155                        mmu_sync_children(vcpu, sp);
2156                }
2157        }
2158}
2159
2160void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2161{
2162        spin_lock(&vcpu->kvm->mmu_lock);
2163        mmu_sync_roots(vcpu);
2164        spin_unlock(&vcpu->kvm->mmu_lock);
2165}
2166
2167static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2168{
2169        return vaddr;
2170}
2171
2172static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2173                                u32 error_code)
2174{
2175        gfn_t gfn;
2176        int r;
2177
2178        pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2179        r = mmu_topup_memory_caches(vcpu);
2180        if (r)
2181                return r;
2182
2183        ASSERT(vcpu);
2184        ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2185
2186        gfn = gva >> PAGE_SHIFT;
2187
2188        return nonpaging_map(vcpu, gva & PAGE_MASK,
2189                             error_code & PFERR_WRITE_MASK, gfn);
2190}
2191
2192static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2193                                u32 error_code)
2194{
2195        pfn_t pfn;
2196        int r;
2197        int level;
2198        gfn_t gfn = gpa >> PAGE_SHIFT;
2199        unsigned long mmu_seq;
2200
2201        ASSERT(vcpu);
2202        ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2203
2204        r = mmu_topup_memory_caches(vcpu);
2205        if (r)
2206                return r;
2207
2208        level = mapping_level(vcpu, gfn);
2209
2210        gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2211
2212        mmu_seq = vcpu->kvm->mmu_notifier_seq;
2213        smp_rmb();
2214        pfn = gfn_to_pfn(vcpu->kvm, gfn);
2215        if (is_error_pfn(pfn)) {
2216                kvm_release_pfn_clean(pfn);
2217                return 1;
2218        }
2219        spin_lock(&vcpu->kvm->mmu_lock);
2220        if (mmu_notifier_retry(vcpu, mmu_seq))
2221                goto out_unlock;
2222        kvm_mmu_free_some_pages(vcpu);
2223        r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2224                         level, gfn, pfn);
2225        spin_unlock(&vcpu->kvm->mmu_lock);
2226
2227        return r;
2228
2229out_unlock:
2230        spin_unlock(&vcpu->kvm->mmu_lock);
2231        kvm_release_pfn_clean(pfn);
2232        return 0;
2233}
2234
2235static void nonpaging_free(struct kvm_vcpu *vcpu)
2236{
2237        mmu_free_roots(vcpu);
2238}
2239
2240static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2241{
2242        struct kvm_mmu *context = &vcpu->arch.mmu;
2243
2244        context->new_cr3 = nonpaging_new_cr3;
2245        context->page_fault = nonpaging_page_fault;
2246        context->gva_to_gpa = nonpaging_gva_to_gpa;
2247        context->free = nonpaging_free;
2248        context->prefetch_page = nonpaging_prefetch_page;
2249        context->sync_page = nonpaging_sync_page;
2250        context->invlpg = nonpaging_invlpg;
2251        context->root_level = 0;
2252        context->shadow_root_level = PT32E_ROOT_LEVEL;
2253        context->root_hpa = INVALID_PAGE;
2254        return 0;
2255}
2256
2257void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2258{
2259        ++vcpu->stat.tlb_flush;
2260        kvm_x86_ops->tlb_flush(vcpu);
2261}
2262
2263static void paging_new_cr3(struct kvm_vcpu *vcpu)
2264{
2265        pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2266        mmu_free_roots(vcpu);
2267}
2268
2269static void inject_page_fault(struct kvm_vcpu *vcpu,
2270                              u64 addr,
2271                              u32 err_code)
2272{
2273        kvm_inject_page_fault(vcpu, addr, err_code);
2274}
2275
2276static void paging_free(struct kvm_vcpu *vcpu)
2277{
2278        nonpaging_free(vcpu);
2279}
2280
2281static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2282{
2283        int bit7;
2284
2285        bit7 = (gpte >> 7) & 1;
2286        return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2287}
2288
2289#define PTTYPE 64
2290#include "paging_tmpl.h"
2291#undef PTTYPE
2292
2293#define PTTYPE 32
2294#include "paging_tmpl.h"
2295#undef PTTYPE
2296
2297static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2298{
2299        struct kvm_mmu *context = &vcpu->arch.mmu;
2300        int maxphyaddr = cpuid_maxphyaddr(vcpu);
2301        u64 exb_bit_rsvd = 0;
2302
2303        if (!is_nx(vcpu))
2304                exb_bit_rsvd = rsvd_bits(63, 63);
2305        switch (level) {
2306        case PT32_ROOT_LEVEL:
2307                /* no rsvd bits for 2 level 4K page table entries */
2308                context->rsvd_bits_mask[0][1] = 0;
2309                context->rsvd_bits_mask[0][0] = 0;
2310                if (is_cpuid_PSE36())
2311                        /* 36bits PSE 4MB page */
2312                        context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2313                else
2314                        /* 32 bits PSE 4MB page */
2315                        context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2316                context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2317                break;
2318        case PT32E_ROOT_LEVEL:
2319                context->rsvd_bits_mask[0][2] =
2320                        rsvd_bits(maxphyaddr, 63) |
2321                        rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2322                context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2323                        rsvd_bits(maxphyaddr, 62);      /* PDE */
2324                context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2325                        rsvd_bits(maxphyaddr, 62);      /* PTE */
2326                context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2327                        rsvd_bits(maxphyaddr, 62) |
2328                        rsvd_bits(13, 20);              /* large page */
2329                context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2330                break;
2331        case PT64_ROOT_LEVEL:
2332                context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2333                        rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2334                context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2335                        rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2336                context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2337                        rsvd_bits(maxphyaddr, 51);
2338                context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2339                        rsvd_bits(maxphyaddr, 51);
2340                context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2341                context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2342                        rsvd_bits(maxphyaddr, 51) |
2343                        rsvd_bits(13, 29);
2344                context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2345                        rsvd_bits(maxphyaddr, 51) |
2346                        rsvd_bits(13, 20);              /* large page */
2347                context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2348                break;
2349        }
2350}
2351
2352static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2353{
2354        struct kvm_mmu *context = &vcpu->arch.mmu;
2355
2356        ASSERT(is_pae(vcpu));
2357        context->new_cr3 = paging_new_cr3;
2358        context->page_fault = paging64_page_fault;
2359        context->gva_to_gpa = paging64_gva_to_gpa;
2360        context->prefetch_page = paging64_prefetch_page;
2361        context->sync_page = paging64_sync_page;
2362        context->invlpg = paging64_invlpg;
2363        context->free = paging_free;
2364        context->root_level = level;
2365        context->shadow_root_level = level;
2366        context->root_hpa = INVALID_PAGE;
2367        return 0;
2368}
2369
2370static int paging64_init_context(struct kvm_vcpu *vcpu)
2371{
2372        reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2373        return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2374}
2375
2376static int paging32_init_context(struct kvm_vcpu *vcpu)
2377{
2378        struct kvm_mmu *context = &vcpu->arch.mmu;
2379
2380        reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2381        context->new_cr3 = paging_new_cr3;
2382        context->page_fault = paging32_page_fault;
2383        context->gva_to_gpa = paging32_gva_to_gpa;
2384        context->free = paging_free;
2385        context->prefetch_page = paging32_prefetch_page;
2386        context->sync_page = paging32_sync_page;
2387        context->invlpg = paging32_invlpg;
2388        context->root_level = PT32_ROOT_LEVEL;
2389        context->shadow_root_level = PT32E_ROOT_LEVEL;
2390        context->root_hpa = INVALID_PAGE;
2391        return 0;
2392}
2393
2394static int paging32E_init_context(struct kvm_vcpu *vcpu)
2395{
2396        reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2397        return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2398}
2399
2400static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2401{
2402        struct kvm_mmu *context = &vcpu->arch.mmu;
2403
2404        context->new_cr3 = nonpaging_new_cr3;
2405        context->page_fault = tdp_page_fault;
2406        context->free = nonpaging_free;
2407        context->prefetch_page = nonpaging_prefetch_page;
2408        context->sync_page = nonpaging_sync_page;
2409        context->invlpg = nonpaging_invlpg;
2410        context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2411        context->root_hpa = INVALID_PAGE;
2412
2413        if (!is_paging(vcpu)) {
2414                context->gva_to_gpa = nonpaging_gva_to_gpa;
2415                context->root_level = 0;
2416        } else if (is_long_mode(vcpu)) {
2417                reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2418                context->gva_to_gpa = paging64_gva_to_gpa;
2419                context->root_level = PT64_ROOT_LEVEL;
2420        } else if (is_pae(vcpu)) {
2421                reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2422                context->gva_to_gpa = paging64_gva_to_gpa;
2423                context->root_level = PT32E_ROOT_LEVEL;
2424        } else {
2425                reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2426                context->gva_to_gpa = paging32_gva_to_gpa;
2427                context->root_level = PT32_ROOT_LEVEL;
2428        }
2429
2430        return 0;
2431}
2432
2433static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2434{
2435        int r;
2436
2437        ASSERT(vcpu);
2438        ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2439
2440        if (!is_paging(vcpu))
2441                r = nonpaging_init_context(vcpu);
2442        else if (is_long_mode(vcpu))
2443                r = paging64_init_context(vcpu);
2444        else if (is_pae(vcpu))
2445                r = paging32E_init_context(vcpu);
2446        else
2447                r = paging32_init_context(vcpu);
2448
2449        vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2450
2451        return r;
2452}
2453
2454static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2455{
2456        vcpu->arch.update_pte.pfn = bad_pfn;
2457
2458        if (tdp_enabled)
2459                return init_kvm_tdp_mmu(vcpu);
2460        else
2461                return init_kvm_softmmu(vcpu);
2462}
2463
2464static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2465{
2466        ASSERT(vcpu);
2467        if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2468                vcpu->arch.mmu.free(vcpu);
2469                vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2470        }
2471}
2472
2473int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2474{
2475        destroy_kvm_mmu(vcpu);
2476        return init_kvm_mmu(vcpu);
2477}
2478EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2479
2480int kvm_mmu_load(struct kvm_vcpu *vcpu)
2481{
2482        int r;
2483
2484        r = mmu_topup_memory_caches(vcpu);
2485        if (r)
2486                goto out;
2487        spin_lock(&vcpu->kvm->mmu_lock);
2488        kvm_mmu_free_some_pages(vcpu);
2489        r = mmu_alloc_roots(vcpu);
2490        mmu_sync_roots(vcpu);
2491        spin_unlock(&vcpu->kvm->mmu_lock);
2492        if (r)
2493                goto out;
2494        /* set_cr3() should ensure TLB has been flushed */
2495        kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2496out:
2497        return r;
2498}
2499EXPORT_SYMBOL_GPL(kvm_mmu_load);
2500
2501void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2502{
2503        mmu_free_roots(vcpu);
2504}
2505
2506static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2507                                  struct kvm_mmu_page *sp,
2508                                  u64 *spte)
2509{
2510        u64 pte;
2511        struct kvm_mmu_page *child;
2512
2513        pte = *spte;
2514        if (is_shadow_present_pte(pte)) {
2515                if (is_last_spte(pte, sp->role.level))
2516                        rmap_remove(vcpu->kvm, spte);
2517                else {
2518                        child = page_header(pte & PT64_BASE_ADDR_MASK);
2519                        mmu_page_remove_parent_pte(child, spte);
2520                }
2521        }
2522        __set_spte(spte, shadow_trap_nonpresent_pte);
2523        if (is_large_pte(pte))
2524                --vcpu->kvm->stat.lpages;
2525}
2526
2527static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2528                                  struct kvm_mmu_page *sp,
2529                                  u64 *spte,
2530                                  const void *new)
2531{
2532        if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2533                ++vcpu->kvm->stat.mmu_pde_zapped;
2534                return;
2535        }
2536
2537        ++vcpu->kvm->stat.mmu_pte_updated;
2538        if (sp->role.glevels == PT32_ROOT_LEVEL)
2539                paging32_update_pte(vcpu, sp, spte, new);
2540        else
2541                paging64_update_pte(vcpu, sp, spte, new);
2542}
2543
2544static bool need_remote_flush(u64 old, u64 new)
2545{
2546        if (!is_shadow_present_pte(old))
2547                return false;
2548        if (!is_shadow_present_pte(new))
2549                return true;
2550        if ((old ^ new) & PT64_BASE_ADDR_MASK)
2551                return true;
2552        old ^= PT64_NX_MASK;
2553        new ^= PT64_NX_MASK;
2554        return (old & ~new & PT64_PERM_MASK) != 0;
2555}
2556
2557static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2558{
2559        if (need_remote_flush(old, new))
2560                kvm_flush_remote_tlbs(vcpu->kvm);
2561        else
2562                kvm_mmu_flush_tlb(vcpu);
2563}
2564
2565static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2566{
2567        u64 *spte = vcpu->arch.last_pte_updated;
2568
2569        return !!(spte && (*spte & shadow_accessed_mask));
2570}
2571
2572static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2573                                          const u8 *new, int bytes)
2574{
2575        gfn_t gfn;
2576        int r;
2577        u64 gpte = 0;
2578        pfn_t pfn;
2579
2580        if (bytes != 4 && bytes != 8)
2581                return;
2582
2583        /*
2584         * Assume that the pte write on a page table of the same type
2585         * as the current vcpu paging mode.  This is nearly always true
2586         * (might be false while changing modes).  Note it is verified later
2587         * by update_pte().
2588         */
2589        if (is_pae(vcpu)) {
2590                /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2591                if ((bytes == 4) && (gpa % 4 == 0)) {
2592                        r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2593                        if (r)
2594                                return;
2595                        memcpy((void *)&gpte + (gpa % 8), new, 4);
2596                } else if ((bytes == 8) && (gpa % 8 == 0)) {
2597                        memcpy((void *)&gpte, new, 8);
2598                }
2599        } else {
2600                if ((bytes == 4) && (gpa % 4 == 0))
2601                        memcpy((void *)&gpte, new, 4);
2602        }
2603        if (!is_present_gpte(gpte))
2604                return;
2605        gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2606
2607        vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2608        smp_rmb();
2609        pfn = gfn_to_pfn(vcpu->kvm, gfn);
2610
2611        if (is_error_pfn(pfn)) {
2612                kvm_release_pfn_clean(pfn);
2613                return;
2614        }
2615        vcpu->arch.update_pte.gfn = gfn;
2616        vcpu->arch.update_pte.pfn = pfn;
2617}
2618
2619static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2620{
2621        u64 *spte = vcpu->arch.last_pte_updated;
2622
2623        if (spte
2624            && vcpu->arch.last_pte_gfn == gfn
2625            && shadow_accessed_mask
2626            && !(*spte & shadow_accessed_mask)
2627            && is_shadow_present_pte(*spte))
2628                set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2629}
2630
2631void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2632                       const u8 *new, int bytes,
2633                       bool guest_initiated)
2634{
2635        gfn_t gfn = gpa >> PAGE_SHIFT;
2636        struct kvm_mmu_page *sp;
2637        struct hlist_node *node, *n;
2638        struct hlist_head *bucket;
2639        unsigned index;
2640        u64 entry, gentry;
2641        u64 *spte;
2642        unsigned offset = offset_in_page(gpa);
2643        unsigned pte_size;
2644        unsigned page_offset;
2645        unsigned misaligned;
2646        unsigned quadrant;
2647        int level;
2648        int flooded = 0;
2649        int npte;
2650        int r;
2651
2652        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2653        mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2654        spin_lock(&vcpu->kvm->mmu_lock);
2655        kvm_mmu_access_page(vcpu, gfn);
2656        kvm_mmu_free_some_pages(vcpu);
2657        ++vcpu->kvm->stat.mmu_pte_write;
2658        kvm_mmu_audit(vcpu, "pre pte write");
2659        if (guest_initiated) {
2660                if (gfn == vcpu->arch.last_pt_write_gfn
2661                    && !last_updated_pte_accessed(vcpu)) {
2662                        ++vcpu->arch.last_pt_write_count;
2663                        if (vcpu->arch.last_pt_write_count >= 3)
2664                                flooded = 1;
2665                } else {
2666                        vcpu->arch.last_pt_write_gfn = gfn;
2667                        vcpu->arch.last_pt_write_count = 1;
2668                        vcpu->arch.last_pte_updated = NULL;
2669                }
2670        }
2671        index = kvm_page_table_hashfn(gfn);
2672        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2673        hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2674                if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2675                        continue;
2676                pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2677                misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2678                misaligned |= bytes < 4;
2679                if (misaligned || flooded) {
2680                        /*
2681                         * Misaligned accesses are too much trouble to fix
2682                         * up; also, they usually indicate a page is not used
2683                         * as a page table.
2684                         *
2685                         * If we're seeing too many writes to a page,
2686                         * it may no longer be a page table, or we may be
2687                         * forking, in which case it is better to unmap the
2688                         * page.
2689                         */
2690                        pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2691                                 gpa, bytes, sp->role.word);
2692                        if (kvm_mmu_zap_page(vcpu->kvm, sp))
2693                                n = bucket->first;
2694                        ++vcpu->kvm->stat.mmu_flooded;
2695                        continue;
2696                }
2697                page_offset = offset;
2698                level = sp->role.level;
2699                npte = 1;
2700                if (sp->role.glevels == PT32_ROOT_LEVEL) {
2701                        page_offset <<= 1;      /* 32->64 */
2702                        /*
2703                         * A 32-bit pde maps 4MB while the shadow pdes map
2704                         * only 2MB.  So we need to double the offset again
2705                         * and zap two pdes instead of one.
2706                         */
2707                        if (level == PT32_ROOT_LEVEL) {
2708                                page_offset &= ~7; /* kill rounding error */
2709                                page_offset <<= 1;
2710                                npte = 2;
2711                        }
2712                        quadrant = page_offset >> PAGE_SHIFT;
2713                        page_offset &= ~PAGE_MASK;
2714                        if (quadrant != sp->role.quadrant)
2715                                continue;
2716                }
2717                spte = &sp->spt[page_offset / sizeof(*spte)];
2718                if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2719                        gentry = 0;
2720                        r = kvm_read_guest_atomic(vcpu->kvm,
2721                                                  gpa & ~(u64)(pte_size - 1),
2722                                                  &gentry, pte_size);
2723                        new = (const void *)&gentry;
2724                        if (r < 0)
2725                                new = NULL;
2726                }
2727                while (npte--) {
2728                        entry = *spte;
2729                        mmu_pte_write_zap_pte(vcpu, sp, spte);
2730                        if (new)
2731                                mmu_pte_write_new_pte(vcpu, sp, spte, new);
2732                        mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2733                        ++spte;
2734                }
2735        }
2736        kvm_mmu_audit(vcpu, "post pte write");
2737        spin_unlock(&vcpu->kvm->mmu_lock);
2738        if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2739                kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2740                vcpu->arch.update_pte.pfn = bad_pfn;
2741        }
2742}
2743
2744int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2745{
2746        gpa_t gpa;
2747        int r;
2748
2749        if (tdp_enabled)
2750                return 0;
2751
2752        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2753
2754        spin_lock(&vcpu->kvm->mmu_lock);
2755        r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2756        spin_unlock(&vcpu->kvm->mmu_lock);
2757        return r;
2758}
2759EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2760
2761void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2762{
2763        while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES &&
2764               !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2765                struct kvm_mmu_page *sp;
2766
2767                sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2768                                  struct kvm_mmu_page, link);
2769                kvm_mmu_zap_page(vcpu->kvm, sp);
2770                ++vcpu->kvm->stat.mmu_recycled;
2771        }
2772}
2773
2774int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2775{
2776        int r;
2777        enum emulation_result er;
2778
2779        r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2780        if (r < 0)
2781                goto out;
2782
2783        if (!r) {
2784                r = 1;
2785                goto out;
2786        }
2787
2788        r = mmu_topup_memory_caches(vcpu);
2789        if (r)
2790                goto out;
2791
2792        er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2793
2794        switch (er) {
2795        case EMULATE_DONE:
2796                return 1;
2797        case EMULATE_DO_MMIO:
2798                ++vcpu->stat.mmio_exits;
2799                return 0;
2800        case EMULATE_FAIL:
2801                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2802                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2803                return 0;
2804        default:
2805                BUG();
2806        }
2807out:
2808        return r;
2809}
2810EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2811
2812void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2813{
2814        vcpu->arch.mmu.invlpg(vcpu, gva);
2815        kvm_mmu_flush_tlb(vcpu);
2816        ++vcpu->stat.invlpg;
2817}
2818EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2819
2820void kvm_enable_tdp(void)
2821{
2822        tdp_enabled = true;
2823}
2824EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2825
2826void kvm_disable_tdp(void)
2827{
2828        tdp_enabled = false;
2829}
2830EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2831
2832static void free_mmu_pages(struct kvm_vcpu *vcpu)
2833{
2834        free_page((unsigned long)vcpu->arch.mmu.pae_root);
2835}
2836
2837static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2838{
2839        struct page *page;
2840        int i;
2841
2842        ASSERT(vcpu);
2843
2844        /*
2845         * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2846         * Therefore we need to allocate shadow page tables in the first
2847         * 4GB of memory, which happens to fit the DMA32 zone.
2848         */
2849        page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2850        if (!page)
2851                goto error_1;
2852        vcpu->arch.mmu.pae_root = page_address(page);
2853        for (i = 0; i < 4; ++i)
2854                vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2855
2856        return 0;
2857
2858error_1:
2859        free_mmu_pages(vcpu);
2860        return -ENOMEM;
2861}
2862
2863int kvm_mmu_create(struct kvm_vcpu *vcpu)
2864{
2865        ASSERT(vcpu);
2866        ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2867
2868        return alloc_mmu_pages(vcpu);
2869}
2870
2871int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2872{
2873        ASSERT(vcpu);
2874        ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2875
2876        return init_kvm_mmu(vcpu);
2877}
2878
2879void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2880{
2881        ASSERT(vcpu);
2882
2883        destroy_kvm_mmu(vcpu);
2884        free_mmu_pages(vcpu);
2885        mmu_free_memory_caches(vcpu);
2886}
2887
2888void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2889{
2890        struct kvm_mmu_page *sp;
2891
2892        list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2893                int i;
2894                u64 *pt;
2895
2896                if (!test_bit(slot, sp->slot_bitmap))
2897                        continue;
2898
2899                pt = sp->spt;
2900                for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2901                        /* avoid RMW */
2902                        if (pt[i] & PT_WRITABLE_MASK)
2903                                pt[i] &= ~PT_WRITABLE_MASK;
2904        }
2905        kvm_flush_remote_tlbs(kvm);
2906}
2907
2908void kvm_mmu_zap_all(struct kvm *kvm)
2909{
2910        struct kvm_mmu_page *sp, *node;
2911
2912        spin_lock(&kvm->mmu_lock);
2913        list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2914                if (kvm_mmu_zap_page(kvm, sp))
2915                        node = container_of(kvm->arch.active_mmu_pages.next,
2916                                            struct kvm_mmu_page, link);
2917        spin_unlock(&kvm->mmu_lock);
2918
2919        kvm_flush_remote_tlbs(kvm);
2920}
2921
2922static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2923{
2924        struct kvm_mmu_page *page;
2925
2926        page = container_of(kvm->arch.active_mmu_pages.prev,
2927                            struct kvm_mmu_page, link);
2928        kvm_mmu_zap_page(kvm, page);
2929}
2930
2931static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2932{
2933        struct kvm *kvm;
2934        struct kvm *kvm_freed = NULL;
2935        int cache_count = 0;
2936
2937        spin_lock(&kvm_lock);
2938
2939        list_for_each_entry(kvm, &vm_list, vm_list) {
2940                int npages;
2941
2942                if (!down_read_trylock(&kvm->slots_lock))
2943                        continue;
2944                spin_lock(&kvm->mmu_lock);
2945                npages = kvm->arch.n_alloc_mmu_pages -
2946                         kvm->arch.n_free_mmu_pages;
2947                cache_count += npages;
2948                if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2949                        kvm_mmu_remove_one_alloc_mmu_page(kvm);
2950                        cache_count--;
2951                        kvm_freed = kvm;
2952                }
2953                nr_to_scan--;
2954
2955                spin_unlock(&kvm->mmu_lock);
2956                up_read(&kvm->slots_lock);
2957        }
2958        if (kvm_freed)
2959                list_move_tail(&kvm_freed->vm_list, &vm_list);
2960
2961        spin_unlock(&kvm_lock);
2962
2963        return cache_count;
2964}
2965
2966static struct shrinker mmu_shrinker = {
2967        .shrink = mmu_shrink,
2968        .seeks = DEFAULT_SEEKS * 10,
2969};
2970
2971static void mmu_destroy_caches(void)
2972{
2973        if (pte_chain_cache)
2974                kmem_cache_destroy(pte_chain_cache);
2975        if (rmap_desc_cache)
2976                kmem_cache_destroy(rmap_desc_cache);
2977        if (mmu_page_header_cache)
2978                kmem_cache_destroy(mmu_page_header_cache);
2979}
2980
2981void kvm_mmu_module_exit(void)
2982{
2983        mmu_destroy_caches();
2984        unregister_shrinker(&mmu_shrinker);
2985}
2986
2987int kvm_mmu_module_init(void)
2988{
2989        pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2990                                            sizeof(struct kvm_pte_chain),
2991                                            0, 0, NULL);
2992        if (!pte_chain_cache)
2993                goto nomem;
2994        rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2995                                            sizeof(struct kvm_rmap_desc),
2996                                            0, 0, NULL);
2997        if (!rmap_desc_cache)
2998                goto nomem;
2999
3000        mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
3001                                                  sizeof(struct kvm_mmu_page),
3002                                                  0, 0, NULL);
3003        if (!mmu_page_header_cache)
3004                goto nomem;
3005
3006        register_shrinker(&mmu_shrinker);
3007
3008        return 0;
3009
3010nomem:
3011        mmu_destroy_caches();
3012        return -ENOMEM;
3013}
3014
3015/*
3016 * Caculate mmu pages needed for kvm.
3017 */
3018unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3019{
3020        int i;
3021        unsigned int nr_mmu_pages;
3022        unsigned int  nr_pages = 0;
3023
3024        for (i = 0; i < kvm->nmemslots; i++)
3025                nr_pages += kvm->memslots[i].npages;
3026
3027        nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3028        nr_mmu_pages = max(nr_mmu_pages,
3029                        (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
3030
3031        return nr_mmu_pages;
3032}
3033
3034static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3035                                unsigned len)
3036{
3037        if (len > buffer->len)
3038                return NULL;
3039        return buffer->ptr;
3040}
3041
3042static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3043                                unsigned len)
3044{
3045        void *ret;
3046
3047        ret = pv_mmu_peek_buffer(buffer, len);
3048        if (!ret)
3049                return ret;
3050        buffer->ptr += len;
3051        buffer->len -= len;
3052        buffer->processed += len;
3053        return ret;
3054}
3055
3056static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3057                             gpa_t addr, gpa_t value)
3058{
3059        int bytes = 8;
3060        int r;
3061
3062        if (!is_long_mode(vcpu) && !is_pae(vcpu))
3063                bytes = 4;
3064
3065        r = mmu_topup_memory_caches(vcpu);
3066        if (r)
3067                return r;
3068
3069        if (!emulator_write_phys(vcpu, addr, &value, bytes))
3070                return -EFAULT;
3071
3072        return 1;
3073}
3074
3075static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3076{
3077        kvm_set_cr3(vcpu, vcpu->arch.cr3);
3078        return 1;
3079}
3080
3081static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
3082{
3083        spin_lock(&vcpu->kvm->mmu_lock);
3084        mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
3085        spin_unlock(&vcpu->kvm->mmu_lock);
3086        return 1;
3087}
3088
3089static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
3090                             struct kvm_pv_mmu_op_buffer *buffer)
3091{
3092        struct kvm_mmu_op_header *header;
3093
3094        header = pv_mmu_peek_buffer(buffer, sizeof *header);
3095        if (!header)
3096                return 0;
3097        switch (header->op) {
3098        case KVM_MMU_OP_WRITE_PTE: {
3099                struct kvm_mmu_op_write_pte *wpte;
3100
3101                wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
3102                if (!wpte)
3103                        return 0;
3104                return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
3105                                        wpte->pte_val);
3106        }
3107        case KVM_MMU_OP_FLUSH_TLB: {
3108                struct kvm_mmu_op_flush_tlb *ftlb;
3109
3110                ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
3111                if (!ftlb)
3112                        return 0;
3113                return kvm_pv_mmu_flush_tlb(vcpu);
3114        }
3115        case KVM_MMU_OP_RELEASE_PT: {
3116                struct kvm_mmu_op_release_pt *rpt;
3117
3118                rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3119                if (!rpt)
3120                        return 0;
3121                return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3122        }
3123        default: return 0;
3124        }
3125}
3126
3127int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3128                  gpa_t addr, unsigned long *ret)
3129{
3130        int r;
3131        struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3132
3133        buffer->ptr = buffer->buf;
3134        buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3135        buffer->processed = 0;
3136
3137        r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3138        if (r)
3139                goto out;
3140
3141        while (buffer->len) {
3142                r = kvm_pv_mmu_op_one(vcpu, buffer);
3143                if (r < 0)
3144                        goto out;
3145                if (r == 0)
3146                        break;
3147        }
3148
3149        r = 1;
3150out:
3151        *ret = buffer->processed;
3152        return r;
3153}
3154
3155int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3156{
3157        struct kvm_shadow_walk_iterator iterator;
3158        int nr_sptes = 0;
3159
3160        spin_lock(&vcpu->kvm->mmu_lock);
3161        for_each_shadow_entry(vcpu, addr, iterator) {
3162                sptes[iterator.level-1] = *iterator.sptep;
3163                nr_sptes++;
3164                if (!is_shadow_present_pte(*iterator.sptep))
3165                        break;
3166        }
3167        spin_unlock(&vcpu->kvm->mmu_lock);
3168
3169        return nr_sptes;
3170}
3171EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3172
3173#ifdef AUDIT
3174
3175static const char *audit_msg;
3176
3177static gva_t canonicalize(gva_t gva)
3178{
3179#ifdef CONFIG_X86_64
3180        gva = (long long)(gva << 16) >> 16;
3181#endif
3182        return gva;
3183}
3184
3185
3186typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
3187                                 u64 *sptep);
3188
3189static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
3190                            inspect_spte_fn fn)
3191{
3192        int i;
3193
3194        for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3195                u64 ent = sp->spt[i];
3196
3197                if (is_shadow_present_pte(ent)) {
3198                        if (!is_last_spte(ent, sp->role.level)) {
3199                                struct kvm_mmu_page *child;
3200                                child = page_header(ent & PT64_BASE_ADDR_MASK);
3201                                __mmu_spte_walk(kvm, child, fn);
3202                        } else
3203                                fn(kvm, sp, &sp->spt[i]);
3204                }
3205        }
3206}
3207
3208static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
3209{
3210        int i;
3211        struct kvm_mmu_page *sp;
3212
3213        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3214                return;
3215        if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3216                hpa_t root = vcpu->arch.mmu.root_hpa;
3217                sp = page_header(root);
3218                __mmu_spte_walk(vcpu->kvm, sp, fn);
3219                return;
3220        }
3221        for (i = 0; i < 4; ++i) {
3222                hpa_t root = vcpu->arch.mmu.pae_root[i];
3223
3224                if (root && VALID_PAGE(root)) {
3225                        root &= PT64_BASE_ADDR_MASK;
3226                        sp = page_header(root);
3227                        __mmu_spte_walk(vcpu->kvm, sp, fn);
3228                }
3229        }
3230        return;
3231}
3232
3233static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3234                                gva_t va, int level)
3235{
3236        u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3237        int i;
3238        gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3239
3240        for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3241                u64 ent = pt[i];
3242
3243                if (ent == shadow_trap_nonpresent_pte)
3244                        continue;
3245
3246                va = canonicalize(va);
3247                if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3248                        audit_mappings_page(vcpu, ent, va, level - 1);
3249                else {
3250                        gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3251                        gfn_t gfn = gpa >> PAGE_SHIFT;
3252                        pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3253                        hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3254
3255                        if (is_error_pfn(pfn)) {
3256                                kvm_release_pfn_clean(pfn);
3257                                continue;
3258                        }
3259
3260                        if (is_shadow_present_pte(ent)
3261                            && (ent & PT64_BASE_ADDR_MASK) != hpa)
3262                                printk(KERN_ERR "xx audit error: (%s) levels %d"
3263                                       " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3264                                       audit_msg, vcpu->arch.mmu.root_level,
3265                                       va, gpa, hpa, ent,
3266                                       is_shadow_present_pte(ent));
3267                        else if (ent == shadow_notrap_nonpresent_pte
3268                                 && !is_error_hpa(hpa))
3269                                printk(KERN_ERR "audit: (%s) notrap shadow,"
3270                                       " valid guest gva %lx\n", audit_msg, va);
3271                        kvm_release_pfn_clean(pfn);
3272
3273                }
3274        }
3275}
3276
3277static void audit_mappings(struct kvm_vcpu *vcpu)
3278{
3279        unsigned i;
3280
3281        if (vcpu->arch.mmu.root_level == 4)
3282                audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3283        else
3284                for (i = 0; i < 4; ++i)
3285                        if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3286                                audit_mappings_page(vcpu,
3287                                                    vcpu->arch.mmu.pae_root[i],
3288                                                    i << 30,
3289                                                    2);
3290}
3291
3292static int count_rmaps(struct kvm_vcpu *vcpu)
3293{
3294        int nmaps = 0;
3295        int i, j, k;
3296
3297        for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3298                struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
3299                struct kvm_rmap_desc *d;
3300
3301                for (j = 0; j < m->npages; ++j) {
3302                        unsigned long *rmapp = &m->rmap[j];
3303
3304                        if (!*rmapp)
3305                                continue;
3306                        if (!(*rmapp & 1)) {
3307                                ++nmaps;
3308                                continue;
3309                        }
3310                        d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3311                        while (d) {
3312                                for (k = 0; k < RMAP_EXT; ++k)
3313                                        if (d->sptes[k])
3314                                                ++nmaps;
3315                                        else
3316                                                break;
3317                                d = d->more;
3318                        }
3319                }
3320        }
3321        return nmaps;
3322}
3323
3324void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
3325{
3326        unsigned long *rmapp;
3327        struct kvm_mmu_page *rev_sp;
3328        gfn_t gfn;
3329
3330        if (*sptep & PT_WRITABLE_MASK) {
3331                rev_sp = page_header(__pa(sptep));
3332                gfn = rev_sp->gfns[sptep - rev_sp->spt];
3333
3334                if (!gfn_to_memslot(kvm, gfn)) {
3335                        if (!printk_ratelimit())
3336                                return;
3337                        printk(KERN_ERR "%s: no memslot for gfn %ld\n",
3338                                         audit_msg, gfn);
3339                        printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
3340                                        audit_msg, sptep - rev_sp->spt,
3341                                        rev_sp->gfn);
3342                        dump_stack();
3343                        return;
3344                }
3345
3346                rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
3347                                    is_large_pte(*sptep));
3348                if (!*rmapp) {
3349                        if (!printk_ratelimit())
3350                                return;
3351                        printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
3352                                         audit_msg, *sptep);
3353                        dump_stack();
3354                }
3355        }
3356
3357}
3358
3359void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
3360{
3361        mmu_spte_walk(vcpu, inspect_spte_has_rmap);
3362}
3363
3364static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
3365{
3366        struct kvm_mmu_page *sp;
3367        int i;
3368
3369        list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3370                u64 *pt = sp->spt;
3371
3372                if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3373                        continue;
3374
3375                for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3376                        u64 ent = pt[i];
3377
3378                        if (!(ent & PT_PRESENT_MASK))
3379                                continue;
3380                        if (!(ent & PT_WRITABLE_MASK))
3381                                continue;
3382                        inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
3383                }
3384        }
3385        return;
3386}
3387
3388static void audit_rmap(struct kvm_vcpu *vcpu)
3389{
3390        check_writable_mappings_rmap(vcpu);
3391        count_rmaps(vcpu);
3392}
3393
3394static void audit_write_protection(struct kvm_vcpu *vcpu)
3395{
3396        struct kvm_mmu_page *sp;
3397        struct kvm_memory_slot *slot;
3398        unsigned long *rmapp;
3399        u64 *spte;
3400        gfn_t gfn;
3401
3402        list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3403                if (sp->role.direct)
3404                        continue;
3405                if (sp->unsync)
3406                        continue;
3407
3408                gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3409                slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3410                rmapp = &slot->rmap[gfn - slot->base_gfn];
3411
3412                spte = rmap_next(vcpu->kvm, rmapp, NULL);
3413                while (spte) {
3414                        if (*spte & PT_WRITABLE_MASK)
3415                                printk(KERN_ERR "%s: (%s) shadow page has "
3416                                "writable mappings: gfn %lx role %x\n",
3417                               __func__, audit_msg, sp->gfn,
3418                               sp->role.word);
3419                        spte = rmap_next(vcpu->kvm, rmapp, spte);
3420                }
3421        }
3422}
3423
3424static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3425{
3426        int olddbg = dbg;
3427
3428        dbg = 0;
3429        audit_msg = msg;
3430        audit_rmap(vcpu);
3431        audit_write_protection(vcpu);
3432        if (strcmp("pre pte write", audit_msg) != 0)
3433                audit_mappings(vcpu);
3434        audit_writable_sptes_have_rmaps(vcpu);
3435        dbg = olddbg;
3436}
3437
3438#endif
3439