linux/arch/powerpc/kvm/e500_mmu_host.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Yu Liu, yu.liu@freescale.com
   5 *         Scott Wood, scottwood@freescale.com
   6 *         Ashish Kalra, ashish.kalra@freescale.com
   7 *         Varun Sethi, varun.sethi@freescale.com
   8 *         Alexander Graf, agraf@suse.de
   9 *
  10 * Description:
  11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
  12 * by Hollis Blanchard <hollisb@us.ibm.com>.
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License, version 2, as
  16 * published by the Free Software Foundation.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/slab.h>
  22#include <linux/string.h>
  23#include <linux/kvm.h>
  24#include <linux/kvm_host.h>
  25#include <linux/highmem.h>
  26#include <linux/log2.h>
  27#include <linux/uaccess.h>
  28#include <linux/sched.h>
  29#include <linux/rwsem.h>
  30#include <linux/vmalloc.h>
  31#include <linux/hugetlb.h>
  32#include <asm/kvm_ppc.h>
  33
  34#include "e500.h"
  35#include "timing.h"
  36#include "e500_mmu_host.h"
  37
  38#include "trace_booke.h"
  39
  40#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
  41
  42static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
  43
  44static inline unsigned int tlb1_max_shadow_size(void)
  45{
  46        /* reserve one entry for magic page */
  47        return host_tlb_params[1].entries - tlbcam_index - 1;
  48}
  49
  50static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
  51{
  52        /* Mask off reserved bits. */
  53        mas3 &= MAS3_ATTRIB_MASK;
  54
  55#ifndef CONFIG_KVM_BOOKE_HV
  56        if (!usermode) {
  57                /* Guest is in supervisor mode,
  58                 * so we need to translate guest
  59                 * supervisor permissions into user permissions. */
  60                mas3 &= ~E500_TLB_USER_PERM_MASK;
  61                mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
  62        }
  63        mas3 |= E500_TLB_SUPER_PERM_MASK;
  64#endif
  65        return mas3;
  66}
  67
  68/*
  69 * writing shadow tlb entry to host TLB
  70 */
  71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
  72                                     uint32_t mas0,
  73                                     uint32_t lpid)
  74{
  75        unsigned long flags;
  76
  77        local_irq_save(flags);
  78        mtspr(SPRN_MAS0, mas0);
  79        mtspr(SPRN_MAS1, stlbe->mas1);
  80        mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
  81        mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
  82        mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
  83#ifdef CONFIG_KVM_BOOKE_HV
  84        mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
  85#endif
  86        asm volatile("isync; tlbwe" : : : "memory");
  87
  88#ifdef CONFIG_KVM_BOOKE_HV
  89        /* Must clear mas8 for other host tlbwe's */
  90        mtspr(SPRN_MAS8, 0);
  91        isync();
  92#endif
  93        local_irq_restore(flags);
  94
  95        trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
  96                                      stlbe->mas2, stlbe->mas7_3);
  97}
  98
  99/*
 100 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
 101 *
 102 * We don't care about the address we're searching for, other than that it's
 103 * in the right set and is not present in the TLB.  Using a zero PID and a
 104 * userspace address means we don't have to set and then restore MAS5, or
 105 * calculate a proper MAS6 value.
 106 */
 107static u32 get_host_mas0(unsigned long eaddr)
 108{
 109        unsigned long flags;
 110        u32 mas0;
 111        u32 mas4;
 112
 113        local_irq_save(flags);
 114        mtspr(SPRN_MAS6, 0);
 115        mas4 = mfspr(SPRN_MAS4);
 116        mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
 117        asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
 118        mas0 = mfspr(SPRN_MAS0);
 119        mtspr(SPRN_MAS4, mas4);
 120        local_irq_restore(flags);
 121
 122        return mas0;
 123}
 124
 125/* sesel is for tlb1 only */
 126static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 127                int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
 128{
 129        u32 mas0;
 130
 131        if (tlbsel == 0) {
 132                mas0 = get_host_mas0(stlbe->mas2);
 133                __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
 134        } else {
 135                __write_host_tlbe(stlbe,
 136                                  MAS0_TLBSEL(1) |
 137                                  MAS0_ESEL(to_htlb1_esel(sesel)),
 138                                  vcpu_e500->vcpu.kvm->arch.lpid);
 139        }
 140}
 141
 142/* sesel is for tlb1 only */
 143static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 144                        struct kvm_book3e_206_tlb_entry *gtlbe,
 145                        struct kvm_book3e_206_tlb_entry *stlbe,
 146                        int stlbsel, int sesel)
 147{
 148        int stid;
 149
 150        preempt_disable();
 151        stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
 152
 153        stlbe->mas1 |= MAS1_TID(stid);
 154        write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
 155        preempt_enable();
 156}
 157
 158#ifdef CONFIG_KVM_E500V2
 159/* XXX should be a hook in the gva2hpa translation */
 160void kvmppc_map_magic(struct kvm_vcpu *vcpu)
 161{
 162        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 163        struct kvm_book3e_206_tlb_entry magic;
 164        ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
 165        unsigned int stid;
 166        kvm_pfn_t pfn;
 167
 168        pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
 169        get_page(pfn_to_page(pfn));
 170
 171        preempt_disable();
 172        stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
 173
 174        magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
 175                     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 176        magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
 177        magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
 178                       MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
 179        magic.mas8 = 0;
 180
 181        __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
 182        preempt_enable();
 183}
 184#endif
 185
 186void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
 187                         int esel)
 188{
 189        struct kvm_book3e_206_tlb_entry *gtlbe =
 190                get_entry(vcpu_e500, tlbsel, esel);
 191        struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
 192
 193        /* Don't bother with unmapped entries */
 194        if (!(ref->flags & E500_TLB_VALID)) {
 195                WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
 196                     "%s: flags %x\n", __func__, ref->flags);
 197                WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
 198        }
 199
 200        if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
 201                u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
 202                int hw_tlb_indx;
 203                unsigned long flags;
 204
 205                local_irq_save(flags);
 206                while (tmp) {
 207                        hw_tlb_indx = __ilog2_u64(tmp & -tmp);
 208                        mtspr(SPRN_MAS0,
 209                              MAS0_TLBSEL(1) |
 210                              MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
 211                        mtspr(SPRN_MAS1, 0);
 212                        asm volatile("tlbwe");
 213                        vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
 214                        tmp &= tmp - 1;
 215                }
 216                mb();
 217                vcpu_e500->g2h_tlb1_map[esel] = 0;
 218                ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
 219                local_irq_restore(flags);
 220        }
 221
 222        if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
 223                /*
 224                 * TLB1 entry is backed by 4k pages. This should happen
 225                 * rarely and is not worth optimizing. Invalidate everything.
 226                 */
 227                kvmppc_e500_tlbil_all(vcpu_e500);
 228                ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
 229        }
 230
 231        /*
 232         * If TLB entry is still valid then it's a TLB0 entry, and thus
 233         * backed by at most one host tlbe per shadow pid
 234         */
 235        if (ref->flags & E500_TLB_VALID)
 236                kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
 237
 238        /* Mark the TLB as not backed by the host anymore */
 239        ref->flags = 0;
 240}
 241
 242static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
 243{
 244        return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
 245}
 246
 247static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
 248                                         struct kvm_book3e_206_tlb_entry *gtlbe,
 249                                         kvm_pfn_t pfn, unsigned int wimg)
 250{
 251        ref->pfn = pfn;
 252        ref->flags = E500_TLB_VALID;
 253
 254        /* Use guest supplied MAS2_G and MAS2_E */
 255        ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
 256
 257        /* Mark the page accessed */
 258        kvm_set_pfn_accessed(pfn);
 259
 260        if (tlbe_is_writable(gtlbe))
 261                kvm_set_pfn_dirty(pfn);
 262}
 263
 264static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
 265{
 266        if (ref->flags & E500_TLB_VALID) {
 267                /* FIXME: don't log bogus pfn for TLB1 */
 268                trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
 269                ref->flags = 0;
 270        }
 271}
 272
 273static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
 274{
 275        if (vcpu_e500->g2h_tlb1_map)
 276                memset(vcpu_e500->g2h_tlb1_map, 0,
 277                       sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
 278        if (vcpu_e500->h2g_tlb1_rmap)
 279                memset(vcpu_e500->h2g_tlb1_rmap, 0,
 280                       sizeof(unsigned int) * host_tlb_params[1].entries);
 281}
 282
 283static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
 284{
 285        int tlbsel;
 286        int i;
 287
 288        for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
 289                for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
 290                        struct tlbe_ref *ref =
 291                                &vcpu_e500->gtlb_priv[tlbsel][i].ref;
 292                        kvmppc_e500_ref_release(ref);
 293                }
 294        }
 295}
 296
 297void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
 298{
 299        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 300        kvmppc_e500_tlbil_all(vcpu_e500);
 301        clear_tlb_privs(vcpu_e500);
 302        clear_tlb1_bitmap(vcpu_e500);
 303}
 304
 305/* TID must be supplied by the caller */
 306static void kvmppc_e500_setup_stlbe(
 307        struct kvm_vcpu *vcpu,
 308        struct kvm_book3e_206_tlb_entry *gtlbe,
 309        int tsize, struct tlbe_ref *ref, u64 gvaddr,
 310        struct kvm_book3e_206_tlb_entry *stlbe)
 311{
 312        kvm_pfn_t pfn = ref->pfn;
 313        u32 pr = vcpu->arch.shared->msr & MSR_PR;
 314
 315        BUG_ON(!(ref->flags & E500_TLB_VALID));
 316
 317        /* Force IPROT=0 for all guest mappings. */
 318        stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
 319        stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
 320        stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
 321                        e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
 322}
 323
 324static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 325        u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
 326        int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
 327        struct tlbe_ref *ref)
 328{
 329        struct kvm_memory_slot *slot;
 330        unsigned long pfn = 0; /* silence GCC warning */
 331        unsigned long hva;
 332        int pfnmap = 0;
 333        int tsize = BOOK3E_PAGESZ_4K;
 334        int ret = 0;
 335        unsigned long mmu_seq;
 336        struct kvm *kvm = vcpu_e500->vcpu.kvm;
 337        unsigned long tsize_pages = 0;
 338        pte_t *ptep;
 339        unsigned int wimg = 0;
 340        pgd_t *pgdir;
 341        unsigned long flags;
 342
 343        /* used to check for invalidations in progress */
 344        mmu_seq = kvm->mmu_notifier_seq;
 345        smp_rmb();
 346
 347        /*
 348         * Translate guest physical to true physical, acquiring
 349         * a page reference if it is normal, non-reserved memory.
 350         *
 351         * gfn_to_memslot() must succeed because otherwise we wouldn't
 352         * have gotten this far.  Eventually we should just pass the slot
 353         * pointer through from the first lookup.
 354         */
 355        slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
 356        hva = gfn_to_hva_memslot(slot, gfn);
 357
 358        if (tlbsel == 1) {
 359                struct vm_area_struct *vma;
 360                down_read(&current->mm->mmap_sem);
 361
 362                vma = find_vma(current->mm, hva);
 363                if (vma && hva >= vma->vm_start &&
 364                    (vma->vm_flags & VM_PFNMAP)) {
 365                        /*
 366                         * This VMA is a physically contiguous region (e.g.
 367                         * /dev/mem) that bypasses normal Linux page
 368                         * management.  Find the overlap between the
 369                         * vma and the memslot.
 370                         */
 371
 372                        unsigned long start, end;
 373                        unsigned long slot_start, slot_end;
 374
 375                        pfnmap = 1;
 376
 377                        start = vma->vm_pgoff;
 378                        end = start +
 379                              ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
 380
 381                        pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
 382
 383                        slot_start = pfn - (gfn - slot->base_gfn);
 384                        slot_end = slot_start + slot->npages;
 385
 386                        if (start < slot_start)
 387                                start = slot_start;
 388                        if (end > slot_end)
 389                                end = slot_end;
 390
 391                        tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
 392                                MAS1_TSIZE_SHIFT;
 393
 394                        /*
 395                         * e500 doesn't implement the lowest tsize bit,
 396                         * or 1K pages.
 397                         */
 398                        tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
 399
 400                        /*
 401                         * Now find the largest tsize (up to what the guest
 402                         * requested) that will cover gfn, stay within the
 403                         * range, and for which gfn and pfn are mutually
 404                         * aligned.
 405                         */
 406
 407                        for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
 408                                unsigned long gfn_start, gfn_end;
 409                                tsize_pages = 1UL << (tsize - 2);
 410
 411                                gfn_start = gfn & ~(tsize_pages - 1);
 412                                gfn_end = gfn_start + tsize_pages;
 413
 414                                if (gfn_start + pfn - gfn < start)
 415                                        continue;
 416                                if (gfn_end + pfn - gfn > end)
 417                                        continue;
 418                                if ((gfn & (tsize_pages - 1)) !=
 419                                    (pfn & (tsize_pages - 1)))
 420                                        continue;
 421
 422                                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
 423                                pfn &= ~(tsize_pages - 1);
 424                                break;
 425                        }
 426                } else if (vma && hva >= vma->vm_start &&
 427                           (vma->vm_flags & VM_HUGETLB)) {
 428                        unsigned long psize = vma_kernel_pagesize(vma);
 429
 430                        tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
 431                                MAS1_TSIZE_SHIFT;
 432
 433                        /*
 434                         * Take the largest page size that satisfies both host
 435                         * and guest mapping
 436                         */
 437                        tsize = min(__ilog2(psize) - 10, tsize);
 438
 439                        /*
 440                         * e500 doesn't implement the lowest tsize bit,
 441                         * or 1K pages.
 442                         */
 443                        tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
 444                }
 445
 446                up_read(&current->mm->mmap_sem);
 447        }
 448
 449        if (likely(!pfnmap)) {
 450                tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
 451                pfn = gfn_to_pfn_memslot(slot, gfn);
 452                if (is_error_noslot_pfn(pfn)) {
 453                        if (printk_ratelimit())
 454                                pr_err("%s: real page not found for gfn %lx\n",
 455                                       __func__, (long)gfn);
 456                        return -EINVAL;
 457                }
 458
 459                /* Align guest and physical address to page map boundaries */
 460                pfn &= ~(tsize_pages - 1);
 461                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
 462        }
 463
 464        spin_lock(&kvm->mmu_lock);
 465        if (mmu_notifier_retry(kvm, mmu_seq)) {
 466                ret = -EAGAIN;
 467                goto out;
 468        }
 469
 470
 471        pgdir = vcpu_e500->vcpu.arch.pgdir;
 472        /*
 473         * We are just looking at the wimg bits, so we don't
 474         * care much about the trans splitting bit.
 475         * We are holding kvm->mmu_lock so a notifier invalidate
 476         * can't run hence pfn won't change.
 477         */
 478        local_irq_save(flags);
 479        ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL, NULL);
 480        if (ptep) {
 481                pte_t pte = READ_ONCE(*ptep);
 482
 483                if (pte_present(pte)) {
 484                        wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
 485                                MAS2_WIMGE_MASK;
 486                        local_irq_restore(flags);
 487                } else {
 488                        local_irq_restore(flags);
 489                        pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
 490                                           __func__, (long)gfn, pfn);
 491                        ret = -EINVAL;
 492                        goto out;
 493                }
 494        }
 495        kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
 496
 497        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
 498                                ref, gvaddr, stlbe);
 499
 500        /* Clear i-cache for new pages */
 501        kvmppc_mmu_flush_icache(pfn);
 502
 503out:
 504        spin_unlock(&kvm->mmu_lock);
 505
 506        /* Drop refcount on page, so that mmu notifiers can clear it */
 507        kvm_release_pfn_clean(pfn);
 508
 509        return ret;
 510}
 511
 512/* XXX only map the one-one case, for now use TLB0 */
 513static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
 514                                struct kvm_book3e_206_tlb_entry *stlbe)
 515{
 516        struct kvm_book3e_206_tlb_entry *gtlbe;
 517        struct tlbe_ref *ref;
 518        int stlbsel = 0;
 519        int sesel = 0;
 520        int r;
 521
 522        gtlbe = get_entry(vcpu_e500, 0, esel);
 523        ref = &vcpu_e500->gtlb_priv[0][esel].ref;
 524
 525        r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 526                        get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
 527                        gtlbe, 0, stlbe, ref);
 528        if (r)
 529                return r;
 530
 531        write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
 532
 533        return 0;
 534}
 535
 536static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
 537                                     struct tlbe_ref *ref,
 538                                     int esel)
 539{
 540        unsigned int sesel = vcpu_e500->host_tlb1_nv++;
 541
 542        if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
 543                vcpu_e500->host_tlb1_nv = 0;
 544
 545        if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
 546                unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
 547                vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
 548        }
 549
 550        vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
 551        vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
 552        vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
 553        WARN_ON(!(ref->flags & E500_TLB_VALID));
 554
 555        return sesel;
 556}
 557
 558/* Caller must ensure that the specified guest TLB entry is safe to insert into
 559 * the shadow TLB. */
 560/* For both one-one and one-to-many */
 561static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 562                u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
 563                struct kvm_book3e_206_tlb_entry *stlbe, int esel)
 564{
 565        struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
 566        int sesel;
 567        int r;
 568
 569        r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
 570                                   ref);
 571        if (r)
 572                return r;
 573
 574        /* Use TLB0 when we can only map a page with 4k */
 575        if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
 576                vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
 577                write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
 578                return 0;
 579        }
 580
 581        /* Otherwise map into TLB1 */
 582        sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
 583        write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
 584
 585        return 0;
 586}
 587
 588void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 589                    unsigned int index)
 590{
 591        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 592        struct tlbe_priv *priv;
 593        struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
 594        int tlbsel = tlbsel_of(index);
 595        int esel = esel_of(index);
 596
 597        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 598
 599        switch (tlbsel) {
 600        case 0:
 601                priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 602
 603                /* Triggers after clear_tlb_privs or on initial mapping */
 604                if (!(priv->ref.flags & E500_TLB_VALID)) {
 605                        kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
 606                } else {
 607                        kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
 608                                                &priv->ref, eaddr, &stlbe);
 609                        write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
 610                }
 611                break;
 612
 613        case 1: {
 614                gfn_t gfn = gpaddr >> PAGE_SHIFT;
 615                kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
 616                                     esel);
 617                break;
 618        }
 619
 620        default:
 621                BUG();
 622                break;
 623        }
 624}
 625
 626#ifdef CONFIG_KVM_BOOKE_HV
 627int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
 628                          u32 *instr)
 629{
 630        gva_t geaddr;
 631        hpa_t addr;
 632        hfn_t pfn;
 633        hva_t eaddr;
 634        u32 mas1, mas2, mas3;
 635        u64 mas7_mas3;
 636        struct page *page;
 637        unsigned int addr_space, psize_shift;
 638        bool pr;
 639        unsigned long flags;
 640
 641        /* Search TLB for guest pc to get the real address */
 642        geaddr = kvmppc_get_pc(vcpu);
 643
 644        addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
 645
 646        local_irq_save(flags);
 647        mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
 648        mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
 649        asm volatile("tlbsx 0, %[geaddr]\n" : :
 650                     [geaddr] "r" (geaddr));
 651        mtspr(SPRN_MAS5, 0);
 652        mtspr(SPRN_MAS8, 0);
 653        mas1 = mfspr(SPRN_MAS1);
 654        mas2 = mfspr(SPRN_MAS2);
 655        mas3 = mfspr(SPRN_MAS3);
 656#ifdef CONFIG_64BIT
 657        mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
 658#else
 659        mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
 660#endif
 661        local_irq_restore(flags);
 662
 663        /*
 664         * If the TLB entry for guest pc was evicted, return to the guest.
 665         * There are high chances to find a valid TLB entry next time.
 666         */
 667        if (!(mas1 & MAS1_VALID))
 668                return EMULATE_AGAIN;
 669
 670        /*
 671         * Another thread may rewrite the TLB entry in parallel, don't
 672         * execute from the address if the execute permission is not set
 673         */
 674        pr = vcpu->arch.shared->msr & MSR_PR;
 675        if (unlikely((pr && !(mas3 & MAS3_UX)) ||
 676                     (!pr && !(mas3 & MAS3_SX)))) {
 677                pr_err_ratelimited(
 678                        "%s: Instruction emulation from guest address %08lx without execute permission\n",
 679                        __func__, geaddr);
 680                return EMULATE_AGAIN;
 681        }
 682
 683        /*
 684         * The real address will be mapped by a cacheable, memory coherent,
 685         * write-back page. Check for mismatches when LRAT is used.
 686         */
 687        if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
 688            unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
 689                pr_err_ratelimited(
 690                        "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n",
 691                        __func__, geaddr);
 692                return EMULATE_AGAIN;
 693        }
 694
 695        /* Get pfn */
 696        psize_shift = MAS1_GET_TSIZE(mas1) + 10;
 697        addr = (mas7_mas3 & (~0ULL << psize_shift)) |
 698               (geaddr & ((1ULL << psize_shift) - 1ULL));
 699        pfn = addr >> PAGE_SHIFT;
 700
 701        /* Guard against emulation from devices area */
 702        if (unlikely(!page_is_ram(pfn))) {
 703                pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n",
 704                         __func__, addr);
 705                return EMULATE_AGAIN;
 706        }
 707
 708        /* Map a page and get guest's instruction */
 709        page = pfn_to_page(pfn);
 710        eaddr = (unsigned long)kmap_atomic(page);
 711        *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
 712        kunmap_atomic((u32 *)eaddr);
 713
 714        return EMULATE_DONE;
 715}
 716#else
 717int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
 718                          u32 *instr)
 719{
 720        return EMULATE_AGAIN;
 721}
 722#endif
 723
 724/************* MMU Notifiers *************/
 725
 726int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
 727{
 728        trace_kvm_unmap_hva(hva);
 729
 730        /*
 731         * Flush all shadow tlb entries everywhere. This is slow, but
 732         * we are 100% sure that we catch the to be unmapped page
 733         */
 734        kvm_flush_remote_tlbs(kvm);
 735
 736        return 0;
 737}
 738
 739int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 740{
 741        /* kvm_unmap_hva flushes everything anyways */
 742        kvm_unmap_hva(kvm, start);
 743
 744        return 0;
 745}
 746
 747int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
 748{
 749        /* XXX could be more clever ;) */
 750        return 0;
 751}
 752
 753int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 754{
 755        /* XXX could be more clever ;) */
 756        return 0;
 757}
 758
 759void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 760{
 761        /* The page will get remapped properly on its next fault */
 762        kvm_unmap_hva(kvm, hva);
 763}
 764
 765/*****************************************/
 766
 767int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 768{
 769        host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
 770        host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
 771
 772        /*
 773         * This should never happen on real e500 hardware, but is
 774         * architecturally possible -- e.g. in some weird nested
 775         * virtualization case.
 776         */
 777        if (host_tlb_params[0].entries == 0 ||
 778            host_tlb_params[1].entries == 0) {
 779                pr_err("%s: need to know host tlb size\n", __func__);
 780                return -ENODEV;
 781        }
 782
 783        host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
 784                                  TLBnCFG_ASSOC_SHIFT;
 785        host_tlb_params[1].ways = host_tlb_params[1].entries;
 786
 787        if (!is_power_of_2(host_tlb_params[0].entries) ||
 788            !is_power_of_2(host_tlb_params[0].ways) ||
 789            host_tlb_params[0].entries < host_tlb_params[0].ways ||
 790            host_tlb_params[0].ways == 0) {
 791                pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
 792                       __func__, host_tlb_params[0].entries,
 793                       host_tlb_params[0].ways);
 794                return -ENODEV;
 795        }
 796
 797        host_tlb_params[0].sets =
 798                host_tlb_params[0].entries / host_tlb_params[0].ways;
 799        host_tlb_params[1].sets = 1;
 800
 801        vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
 802                                           host_tlb_params[1].entries,
 803                                           GFP_KERNEL);
 804        if (!vcpu_e500->h2g_tlb1_rmap)
 805                return -EINVAL;
 806
 807        return 0;
 808}
 809
 810void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 811{
 812        kfree(vcpu_e500->h2g_tlb1_rmap);
 813}
 814