linux/arch/powerpc/kvm/e500_mmu_host.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Yu Liu, yu.liu@freescale.com
   5 *         Scott Wood, scottwood@freescale.com
   6 *         Ashish Kalra, ashish.kalra@freescale.com
   7 *         Varun Sethi, varun.sethi@freescale.com
   8 *         Alexander Graf, agraf@suse.de
   9 *
  10 * Description:
  11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
  12 * by Hollis Blanchard <hollisb@us.ibm.com>.
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License, version 2, as
  16 * published by the Free Software Foundation.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/slab.h>
  22#include <linux/string.h>
  23#include <linux/kvm.h>
  24#include <linux/kvm_host.h>
  25#include <linux/highmem.h>
  26#include <linux/log2.h>
  27#include <linux/uaccess.h>
  28#include <linux/sched.h>
  29#include <linux/rwsem.h>
  30#include <linux/vmalloc.h>
  31#include <linux/hugetlb.h>
  32#include <asm/kvm_ppc.h>
  33
  34#include "e500.h"
  35#include "trace.h"
  36#include "timing.h"
  37#include "e500_mmu_host.h"
  38
  39#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
  40
  41static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
  42
  43static inline unsigned int tlb1_max_shadow_size(void)
  44{
  45        /* reserve one entry for magic page */
  46        return host_tlb_params[1].entries - tlbcam_index - 1;
  47}
  48
  49static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
  50{
  51        /* Mask off reserved bits. */
  52        mas3 &= MAS3_ATTRIB_MASK;
  53
  54#ifndef CONFIG_KVM_BOOKE_HV
  55        if (!usermode) {
  56                /* Guest is in supervisor mode,
  57                 * so we need to translate guest
  58                 * supervisor permissions into user permissions. */
  59                mas3 &= ~E500_TLB_USER_PERM_MASK;
  60                mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
  61        }
  62        mas3 |= E500_TLB_SUPER_PERM_MASK;
  63#endif
  64        return mas3;
  65}
  66
  67static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
  68{
  69#ifdef CONFIG_SMP
  70        return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
  71#else
  72        return mas2 & MAS2_ATTRIB_MASK;
  73#endif
  74}
  75
  76/*
  77 * writing shadow tlb entry to host TLB
  78 */
  79static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
  80                                     uint32_t mas0)
  81{
  82        unsigned long flags;
  83
  84        local_irq_save(flags);
  85        mtspr(SPRN_MAS0, mas0);
  86        mtspr(SPRN_MAS1, stlbe->mas1);
  87        mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
  88        mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
  89        mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
  90#ifdef CONFIG_KVM_BOOKE_HV
  91        mtspr(SPRN_MAS8, stlbe->mas8);
  92#endif
  93        asm volatile("isync; tlbwe" : : : "memory");
  94
  95#ifdef CONFIG_KVM_BOOKE_HV
  96        /* Must clear mas8 for other host tlbwe's */
  97        mtspr(SPRN_MAS8, 0);
  98        isync();
  99#endif
 100        local_irq_restore(flags);
 101
 102        trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
 103                                      stlbe->mas2, stlbe->mas7_3);
 104}
 105
 106/*
 107 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
 108 *
 109 * We don't care about the address we're searching for, other than that it's
 110 * in the right set and is not present in the TLB.  Using a zero PID and a
 111 * userspace address means we don't have to set and then restore MAS5, or
 112 * calculate a proper MAS6 value.
 113 */
 114static u32 get_host_mas0(unsigned long eaddr)
 115{
 116        unsigned long flags;
 117        u32 mas0;
 118
 119        local_irq_save(flags);
 120        mtspr(SPRN_MAS6, 0);
 121        asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
 122        mas0 = mfspr(SPRN_MAS0);
 123        local_irq_restore(flags);
 124
 125        return mas0;
 126}
 127
 128/* sesel is for tlb1 only */
 129static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 130                int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
 131{
 132        u32 mas0;
 133
 134        if (tlbsel == 0) {
 135                mas0 = get_host_mas0(stlbe->mas2);
 136                __write_host_tlbe(stlbe, mas0);
 137        } else {
 138                __write_host_tlbe(stlbe,
 139                                  MAS0_TLBSEL(1) |
 140                                  MAS0_ESEL(to_htlb1_esel(sesel)));
 141        }
 142}
 143
 144/* sesel is for tlb1 only */
 145static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 146                        struct kvm_book3e_206_tlb_entry *gtlbe,
 147                        struct kvm_book3e_206_tlb_entry *stlbe,
 148                        int stlbsel, int sesel)
 149{
 150        int stid;
 151
 152        preempt_disable();
 153        stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
 154
 155        stlbe->mas1 |= MAS1_TID(stid);
 156        write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
 157        preempt_enable();
 158}
 159
 160#ifdef CONFIG_KVM_E500V2
 161/* XXX should be a hook in the gva2hpa translation */
 162void kvmppc_map_magic(struct kvm_vcpu *vcpu)
 163{
 164        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 165        struct kvm_book3e_206_tlb_entry magic;
 166        ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
 167        unsigned int stid;
 168        pfn_t pfn;
 169
 170        pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
 171        get_page(pfn_to_page(pfn));
 172
 173        preempt_disable();
 174        stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
 175
 176        magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
 177                     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 178        magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
 179        magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
 180                       MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
 181        magic.mas8 = 0;
 182
 183        __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
 184        preempt_enable();
 185}
 186#endif
 187
 188void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
 189                         int esel)
 190{
 191        struct kvm_book3e_206_tlb_entry *gtlbe =
 192                get_entry(vcpu_e500, tlbsel, esel);
 193        struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
 194
 195        /* Don't bother with unmapped entries */
 196        if (!(ref->flags & E500_TLB_VALID)) {
 197                WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
 198                     "%s: flags %x\n", __func__, ref->flags);
 199                WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
 200        }
 201
 202        if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
 203                u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
 204                int hw_tlb_indx;
 205                unsigned long flags;
 206
 207                local_irq_save(flags);
 208                while (tmp) {
 209                        hw_tlb_indx = __ilog2_u64(tmp & -tmp);
 210                        mtspr(SPRN_MAS0,
 211                              MAS0_TLBSEL(1) |
 212                              MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
 213                        mtspr(SPRN_MAS1, 0);
 214                        asm volatile("tlbwe");
 215                        vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
 216                        tmp &= tmp - 1;
 217                }
 218                mb();
 219                vcpu_e500->g2h_tlb1_map[esel] = 0;
 220                ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
 221                local_irq_restore(flags);
 222        }
 223
 224        if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
 225                /*
 226                 * TLB1 entry is backed by 4k pages. This should happen
 227                 * rarely and is not worth optimizing. Invalidate everything.
 228                 */
 229                kvmppc_e500_tlbil_all(vcpu_e500);
 230                ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
 231        }
 232
 233        /* Already invalidated in between */
 234        if (!(ref->flags & E500_TLB_VALID))
 235                return;
 236
 237        /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
 238        kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
 239
 240        /* Mark the TLB as not backed by the host anymore */
 241        ref->flags &= ~E500_TLB_VALID;
 242}
 243
 244static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
 245{
 246        return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
 247}
 248
 249static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
 250                                         struct kvm_book3e_206_tlb_entry *gtlbe,
 251                                         pfn_t pfn)
 252{
 253        ref->pfn = pfn;
 254        ref->flags |= E500_TLB_VALID;
 255
 256        if (tlbe_is_writable(gtlbe))
 257                kvm_set_pfn_dirty(pfn);
 258}
 259
 260static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
 261{
 262        if (ref->flags & E500_TLB_VALID) {
 263                /* FIXME: don't log bogus pfn for TLB1 */
 264                trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
 265                ref->flags = 0;
 266        }
 267}
 268
 269static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
 270{
 271        if (vcpu_e500->g2h_tlb1_map)
 272                memset(vcpu_e500->g2h_tlb1_map, 0,
 273                       sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
 274        if (vcpu_e500->h2g_tlb1_rmap)
 275                memset(vcpu_e500->h2g_tlb1_rmap, 0,
 276                       sizeof(unsigned int) * host_tlb_params[1].entries);
 277}
 278
 279static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
 280{
 281        int tlbsel;
 282        int i;
 283
 284        for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
 285                for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
 286                        struct tlbe_ref *ref =
 287                                &vcpu_e500->gtlb_priv[tlbsel][i].ref;
 288                        kvmppc_e500_ref_release(ref);
 289                }
 290        }
 291}
 292
 293void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
 294{
 295        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 296        kvmppc_e500_tlbil_all(vcpu_e500);
 297        clear_tlb_privs(vcpu_e500);
 298        clear_tlb1_bitmap(vcpu_e500);
 299}
 300
 301/* TID must be supplied by the caller */
 302static void kvmppc_e500_setup_stlbe(
 303        struct kvm_vcpu *vcpu,
 304        struct kvm_book3e_206_tlb_entry *gtlbe,
 305        int tsize, struct tlbe_ref *ref, u64 gvaddr,
 306        struct kvm_book3e_206_tlb_entry *stlbe)
 307{
 308        pfn_t pfn = ref->pfn;
 309        u32 pr = vcpu->arch.shared->msr & MSR_PR;
 310
 311        BUG_ON(!(ref->flags & E500_TLB_VALID));
 312
 313        /* Force IPROT=0 for all guest mappings. */
 314        stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
 315        stlbe->mas2 = (gvaddr & MAS2_EPN) |
 316                      e500_shadow_mas2_attrib(gtlbe->mas2, pr);
 317        stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
 318                        e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
 319
 320#ifdef CONFIG_KVM_BOOKE_HV
 321        stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
 322#endif
 323}
 324
 325static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 326        u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
 327        int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
 328        struct tlbe_ref *ref)
 329{
 330        struct kvm_memory_slot *slot;
 331        unsigned long pfn = 0; /* silence GCC warning */
 332        unsigned long hva;
 333        int pfnmap = 0;
 334        int tsize = BOOK3E_PAGESZ_4K;
 335
 336        /*
 337         * Translate guest physical to true physical, acquiring
 338         * a page reference if it is normal, non-reserved memory.
 339         *
 340         * gfn_to_memslot() must succeed because otherwise we wouldn't
 341         * have gotten this far.  Eventually we should just pass the slot
 342         * pointer through from the first lookup.
 343         */
 344        slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
 345        hva = gfn_to_hva_memslot(slot, gfn);
 346
 347        if (tlbsel == 1) {
 348                struct vm_area_struct *vma;
 349                down_read(&current->mm->mmap_sem);
 350
 351                vma = find_vma(current->mm, hva);
 352                if (vma && hva >= vma->vm_start &&
 353                    (vma->vm_flags & VM_PFNMAP)) {
 354                        /*
 355                         * This VMA is a physically contiguous region (e.g.
 356                         * /dev/mem) that bypasses normal Linux page
 357                         * management.  Find the overlap between the
 358                         * vma and the memslot.
 359                         */
 360
 361                        unsigned long start, end;
 362                        unsigned long slot_start, slot_end;
 363
 364                        pfnmap = 1;
 365
 366                        start = vma->vm_pgoff;
 367                        end = start +
 368                              ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
 369
 370                        pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
 371
 372                        slot_start = pfn - (gfn - slot->base_gfn);
 373                        slot_end = slot_start + slot->npages;
 374
 375                        if (start < slot_start)
 376                                start = slot_start;
 377                        if (end > slot_end)
 378                                end = slot_end;
 379
 380                        tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
 381                                MAS1_TSIZE_SHIFT;
 382
 383                        /*
 384                         * e500 doesn't implement the lowest tsize bit,
 385                         * or 1K pages.
 386                         */
 387                        tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
 388
 389                        /*
 390                         * Now find the largest tsize (up to what the guest
 391                         * requested) that will cover gfn, stay within the
 392                         * range, and for which gfn and pfn are mutually
 393                         * aligned.
 394                         */
 395
 396                        for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
 397                                unsigned long gfn_start, gfn_end, tsize_pages;
 398                                tsize_pages = 1 << (tsize - 2);
 399
 400                                gfn_start = gfn & ~(tsize_pages - 1);
 401                                gfn_end = gfn_start + tsize_pages;
 402
 403                                if (gfn_start + pfn - gfn < start)
 404                                        continue;
 405                                if (gfn_end + pfn - gfn > end)
 406                                        continue;
 407                                if ((gfn & (tsize_pages - 1)) !=
 408                                    (pfn & (tsize_pages - 1)))
 409                                        continue;
 410
 411                                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
 412                                pfn &= ~(tsize_pages - 1);
 413                                break;
 414                        }
 415                } else if (vma && hva >= vma->vm_start &&
 416                           (vma->vm_flags & VM_HUGETLB)) {
 417                        unsigned long psize = vma_kernel_pagesize(vma);
 418
 419                        tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
 420                                MAS1_TSIZE_SHIFT;
 421
 422                        /*
 423                         * Take the largest page size that satisfies both host
 424                         * and guest mapping
 425                         */
 426                        tsize = min(__ilog2(psize) - 10, tsize);
 427
 428                        /*
 429                         * e500 doesn't implement the lowest tsize bit,
 430                         * or 1K pages.
 431                         */
 432                        tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
 433                }
 434
 435                up_read(&current->mm->mmap_sem);
 436        }
 437
 438        if (likely(!pfnmap)) {
 439                unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
 440                pfn = gfn_to_pfn_memslot(slot, gfn);
 441                if (is_error_noslot_pfn(pfn)) {
 442                        printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
 443                                        (long)gfn);
 444                        return -EINVAL;
 445                }
 446
 447                /* Align guest and physical address to page map boundaries */
 448                pfn &= ~(tsize_pages - 1);
 449                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
 450        }
 451
 452        kvmppc_e500_ref_setup(ref, gtlbe, pfn);
 453
 454        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
 455                                ref, gvaddr, stlbe);
 456
 457        /* Clear i-cache for new pages */
 458        kvmppc_mmu_flush_icache(pfn);
 459
 460        /* Drop refcount on page, so that mmu notifiers can clear it */
 461        kvm_release_pfn_clean(pfn);
 462
 463        return 0;
 464}
 465
 466/* XXX only map the one-one case, for now use TLB0 */
 467static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
 468                                struct kvm_book3e_206_tlb_entry *stlbe)
 469{
 470        struct kvm_book3e_206_tlb_entry *gtlbe;
 471        struct tlbe_ref *ref;
 472        int stlbsel = 0;
 473        int sesel = 0;
 474        int r;
 475
 476        gtlbe = get_entry(vcpu_e500, 0, esel);
 477        ref = &vcpu_e500->gtlb_priv[0][esel].ref;
 478
 479        r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 480                        get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
 481                        gtlbe, 0, stlbe, ref);
 482        if (r)
 483                return r;
 484
 485        write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
 486
 487        return 0;
 488}
 489
 490static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
 491                                     struct tlbe_ref *ref,
 492                                     int esel)
 493{
 494        unsigned int sesel = vcpu_e500->host_tlb1_nv++;
 495
 496        if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
 497                vcpu_e500->host_tlb1_nv = 0;
 498
 499        if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
 500                unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
 501                vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
 502        }
 503
 504        vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
 505        vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
 506        vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
 507        WARN_ON(!(ref->flags & E500_TLB_VALID));
 508
 509        return sesel;
 510}
 511
 512/* Caller must ensure that the specified guest TLB entry is safe to insert into
 513 * the shadow TLB. */
 514/* For both one-one and one-to-many */
 515static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 516                u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
 517                struct kvm_book3e_206_tlb_entry *stlbe, int esel)
 518{
 519        struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
 520        int sesel;
 521        int r;
 522
 523        r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
 524                                   ref);
 525        if (r)
 526                return r;
 527
 528        /* Use TLB0 when we can only map a page with 4k */
 529        if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
 530                vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
 531                write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
 532                return 0;
 533        }
 534
 535        /* Otherwise map into TLB1 */
 536        sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
 537        write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
 538
 539        return 0;
 540}
 541
 542void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 543                    unsigned int index)
 544{
 545        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 546        struct tlbe_priv *priv;
 547        struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
 548        int tlbsel = tlbsel_of(index);
 549        int esel = esel_of(index);
 550
 551        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 552
 553        switch (tlbsel) {
 554        case 0:
 555                priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 556
 557                /* Triggers after clear_tlb_privs or on initial mapping */
 558                if (!(priv->ref.flags & E500_TLB_VALID)) {
 559                        kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
 560                } else {
 561                        kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
 562                                                &priv->ref, eaddr, &stlbe);
 563                        write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
 564                }
 565                break;
 566
 567        case 1: {
 568                gfn_t gfn = gpaddr >> PAGE_SHIFT;
 569                kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
 570                                     esel);
 571                break;
 572        }
 573
 574        default:
 575                BUG();
 576                break;
 577        }
 578}
 579
 580/************* MMU Notifiers *************/
 581
 582int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
 583{
 584        trace_kvm_unmap_hva(hva);
 585
 586        /*
 587         * Flush all shadow tlb entries everywhere. This is slow, but
 588         * we are 100% sure that we catch the to be unmapped page
 589         */
 590        kvm_flush_remote_tlbs(kvm);
 591
 592        return 0;
 593}
 594
 595int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 596{
 597        /* kvm_unmap_hva flushes everything anyways */
 598        kvm_unmap_hva(kvm, start);
 599
 600        return 0;
 601}
 602
 603int kvm_age_hva(struct kvm *kvm, unsigned long hva)
 604{
 605        /* XXX could be more clever ;) */
 606        return 0;
 607}
 608
 609int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 610{
 611        /* XXX could be more clever ;) */
 612        return 0;
 613}
 614
 615void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 616{
 617        /* The page will get remapped properly on its next fault */
 618        kvm_unmap_hva(kvm, hva);
 619}
 620
 621/*****************************************/
 622
 623int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 624{
 625        host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
 626        host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
 627
 628        /*
 629         * This should never happen on real e500 hardware, but is
 630         * architecturally possible -- e.g. in some weird nested
 631         * virtualization case.
 632         */
 633        if (host_tlb_params[0].entries == 0 ||
 634            host_tlb_params[1].entries == 0) {
 635                pr_err("%s: need to know host tlb size\n", __func__);
 636                return -ENODEV;
 637        }
 638
 639        host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
 640                                  TLBnCFG_ASSOC_SHIFT;
 641        host_tlb_params[1].ways = host_tlb_params[1].entries;
 642
 643        if (!is_power_of_2(host_tlb_params[0].entries) ||
 644            !is_power_of_2(host_tlb_params[0].ways) ||
 645            host_tlb_params[0].entries < host_tlb_params[0].ways ||
 646            host_tlb_params[0].ways == 0) {
 647                pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
 648                       __func__, host_tlb_params[0].entries,
 649                       host_tlb_params[0].ways);
 650                return -ENODEV;
 651        }
 652
 653        host_tlb_params[0].sets =
 654                host_tlb_params[0].entries / host_tlb_params[0].ways;
 655        host_tlb_params[1].sets = 1;
 656
 657        vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
 658                                           host_tlb_params[1].entries,
 659                                           GFP_KERNEL);
 660        if (!vcpu_e500->h2g_tlb1_rmap)
 661                return -EINVAL;
 662
 663        return 0;
 664}
 665
 666void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 667{
 668        kfree(vcpu_e500->h2g_tlb1_rmap);
 669}
 670