linux/arch/powerpc/kvm/e500_mmu_host.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Yu Liu, yu.liu@freescale.com
   5 *         Scott Wood, scottwood@freescale.com
   6 *         Ashish Kalra, ashish.kalra@freescale.com
   7 *         Varun Sethi, varun.sethi@freescale.com
   8 *         Alexander Graf, agraf@suse.de
   9 *
  10 * Description:
  11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
  12 * by Hollis Blanchard <hollisb@us.ibm.com>.
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License, version 2, as
  16 * published by the Free Software Foundation.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/slab.h>
  22#include <linux/string.h>
  23#include <linux/kvm.h>
  24#include <linux/kvm_host.h>
  25#include <linux/highmem.h>
  26#include <linux/log2.h>
  27#include <linux/uaccess.h>
  28#include <linux/sched/mm.h>
  29#include <linux/rwsem.h>
  30#include <linux/vmalloc.h>
  31#include <linux/hugetlb.h>
  32#include <asm/kvm_ppc.h>
  33#include <asm/pte-walk.h>
  34
  35#include "e500.h"
  36#include "timing.h"
  37#include "e500_mmu_host.h"
  38
  39#include "trace_booke.h"
  40
  41#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
  42
  43static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
  44
  45static inline unsigned int tlb1_max_shadow_size(void)
  46{
  47        /* reserve one entry for magic page */
  48        return host_tlb_params[1].entries - tlbcam_index - 1;
  49}
  50
  51static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
  52{
  53        /* Mask off reserved bits. */
  54        mas3 &= MAS3_ATTRIB_MASK;
  55
  56#ifndef CONFIG_KVM_BOOKE_HV
  57        if (!usermode) {
  58                /* Guest is in supervisor mode,
  59                 * so we need to translate guest
  60                 * supervisor permissions into user permissions. */
  61                mas3 &= ~E500_TLB_USER_PERM_MASK;
  62                mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
  63        }
  64        mas3 |= E500_TLB_SUPER_PERM_MASK;
  65#endif
  66        return mas3;
  67}
  68
  69/*
  70 * writing shadow tlb entry to host TLB
  71 */
  72static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
  73                                     uint32_t mas0,
  74                                     uint32_t lpid)
  75{
  76        unsigned long flags;
  77
  78        local_irq_save(flags);
  79        mtspr(SPRN_MAS0, mas0);
  80        mtspr(SPRN_MAS1, stlbe->mas1);
  81        mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
  82        mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
  83        mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
  84#ifdef CONFIG_KVM_BOOKE_HV
  85        mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
  86#endif
  87        asm volatile("isync; tlbwe" : : : "memory");
  88
  89#ifdef CONFIG_KVM_BOOKE_HV
  90        /* Must clear mas8 for other host tlbwe's */
  91        mtspr(SPRN_MAS8, 0);
  92        isync();
  93#endif
  94        local_irq_restore(flags);
  95
  96        trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
  97                                      stlbe->mas2, stlbe->mas7_3);
  98}
  99
 100/*
 101 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
 102 *
 103 * We don't care about the address we're searching for, other than that it's
 104 * in the right set and is not present in the TLB.  Using a zero PID and a
 105 * userspace address means we don't have to set and then restore MAS5, or
 106 * calculate a proper MAS6 value.
 107 */
 108static u32 get_host_mas0(unsigned long eaddr)
 109{
 110        unsigned long flags;
 111        u32 mas0;
 112        u32 mas4;
 113
 114        local_irq_save(flags);
 115        mtspr(SPRN_MAS6, 0);
 116        mas4 = mfspr(SPRN_MAS4);
 117        mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
 118        asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
 119        mas0 = mfspr(SPRN_MAS0);
 120        mtspr(SPRN_MAS4, mas4);
 121        local_irq_restore(flags);
 122
 123        return mas0;
 124}
 125
 126/* sesel is for tlb1 only */
 127static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 128                int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
 129{
 130        u32 mas0;
 131
 132        if (tlbsel == 0) {
 133                mas0 = get_host_mas0(stlbe->mas2);
 134                __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
 135        } else {
 136                __write_host_tlbe(stlbe,
 137                                  MAS0_TLBSEL(1) |
 138                                  MAS0_ESEL(to_htlb1_esel(sesel)),
 139                                  vcpu_e500->vcpu.kvm->arch.lpid);
 140        }
 141}
 142
 143/* sesel is for tlb1 only */
 144static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 145                        struct kvm_book3e_206_tlb_entry *gtlbe,
 146                        struct kvm_book3e_206_tlb_entry *stlbe,
 147                        int stlbsel, int sesel)
 148{
 149        int stid;
 150
 151        preempt_disable();
 152        stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
 153
 154        stlbe->mas1 |= MAS1_TID(stid);
 155        write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
 156        preempt_enable();
 157}
 158
 159#ifdef CONFIG_KVM_E500V2
 160/* XXX should be a hook in the gva2hpa translation */
 161void kvmppc_map_magic(struct kvm_vcpu *vcpu)
 162{
 163        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 164        struct kvm_book3e_206_tlb_entry magic;
 165        ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
 166        unsigned int stid;
 167        kvm_pfn_t pfn;
 168
 169        pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
 170        get_page(pfn_to_page(pfn));
 171
 172        preempt_disable();
 173        stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
 174
 175        magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
 176                     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 177        magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
 178        magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
 179                       MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
 180        magic.mas8 = 0;
 181
 182        __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
 183        preempt_enable();
 184}
 185#endif
 186
 187void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
 188                         int esel)
 189{
 190        struct kvm_book3e_206_tlb_entry *gtlbe =
 191                get_entry(vcpu_e500, tlbsel, esel);
 192        struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
 193
 194        /* Don't bother with unmapped entries */
 195        if (!(ref->flags & E500_TLB_VALID)) {
 196                WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
 197                     "%s: flags %x\n", __func__, ref->flags);
 198                WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
 199        }
 200
 201        if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
 202                u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
 203                int hw_tlb_indx;
 204                unsigned long flags;
 205
 206                local_irq_save(flags);
 207                while (tmp) {
 208                        hw_tlb_indx = __ilog2_u64(tmp & -tmp);
 209                        mtspr(SPRN_MAS0,
 210                              MAS0_TLBSEL(1) |
 211                              MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
 212                        mtspr(SPRN_MAS1, 0);
 213                        asm volatile("tlbwe");
 214                        vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
 215                        tmp &= tmp - 1;
 216                }
 217                mb();
 218                vcpu_e500->g2h_tlb1_map[esel] = 0;
 219                ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
 220                local_irq_restore(flags);
 221        }
 222
 223        if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
 224                /*
 225                 * TLB1 entry is backed by 4k pages. This should happen
 226                 * rarely and is not worth optimizing. Invalidate everything.
 227                 */
 228                kvmppc_e500_tlbil_all(vcpu_e500);
 229                ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
 230        }
 231
 232        /*
 233         * If TLB entry is still valid then it's a TLB0 entry, and thus
 234         * backed by at most one host tlbe per shadow pid
 235         */
 236        if (ref->flags & E500_TLB_VALID)
 237                kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
 238
 239        /* Mark the TLB as not backed by the host anymore */
 240        ref->flags = 0;
 241}
 242
 243static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
 244{
 245        return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
 246}
 247
 248static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
 249                                         struct kvm_book3e_206_tlb_entry *gtlbe,
 250                                         kvm_pfn_t pfn, unsigned int wimg)
 251{
 252        ref->pfn = pfn;
 253        ref->flags = E500_TLB_VALID;
 254
 255        /* Use guest supplied MAS2_G and MAS2_E */
 256        ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
 257
 258        /* Mark the page accessed */
 259        kvm_set_pfn_accessed(pfn);
 260
 261        if (tlbe_is_writable(gtlbe))
 262                kvm_set_pfn_dirty(pfn);
 263}
 264
 265static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
 266{
 267        if (ref->flags & E500_TLB_VALID) {
 268                /* FIXME: don't log bogus pfn for TLB1 */
 269                trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
 270                ref->flags = 0;
 271        }
 272}
 273
 274static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
 275{
 276        if (vcpu_e500->g2h_tlb1_map)
 277                memset(vcpu_e500->g2h_tlb1_map, 0,
 278                       sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
 279        if (vcpu_e500->h2g_tlb1_rmap)
 280                memset(vcpu_e500->h2g_tlb1_rmap, 0,
 281                       sizeof(unsigned int) * host_tlb_params[1].entries);
 282}
 283
 284static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
 285{
 286        int tlbsel;
 287        int i;
 288
 289        for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
 290                for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
 291                        struct tlbe_ref *ref =
 292                                &vcpu_e500->gtlb_priv[tlbsel][i].ref;
 293                        kvmppc_e500_ref_release(ref);
 294                }
 295        }
 296}
 297
 298void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
 299{
 300        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 301        kvmppc_e500_tlbil_all(vcpu_e500);
 302        clear_tlb_privs(vcpu_e500);
 303        clear_tlb1_bitmap(vcpu_e500);
 304}
 305
 306/* TID must be supplied by the caller */
 307static void kvmppc_e500_setup_stlbe(
 308        struct kvm_vcpu *vcpu,
 309        struct kvm_book3e_206_tlb_entry *gtlbe,
 310        int tsize, struct tlbe_ref *ref, u64 gvaddr,
 311        struct kvm_book3e_206_tlb_entry *stlbe)
 312{
 313        kvm_pfn_t pfn = ref->pfn;
 314        u32 pr = vcpu->arch.shared->msr & MSR_PR;
 315
 316        BUG_ON(!(ref->flags & E500_TLB_VALID));
 317
 318        /* Force IPROT=0 for all guest mappings. */
 319        stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
 320        stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
 321        stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
 322                        e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
 323}
 324
 325static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 326        u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
 327        int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
 328        struct tlbe_ref *ref)
 329{
 330        struct kvm_memory_slot *slot;
 331        unsigned long pfn = 0; /* silence GCC warning */
 332        unsigned long hva;
 333        int pfnmap = 0;
 334        int tsize = BOOK3E_PAGESZ_4K;
 335        int ret = 0;
 336        unsigned long mmu_seq;
 337        struct kvm *kvm = vcpu_e500->vcpu.kvm;
 338        unsigned long tsize_pages = 0;
 339        pte_t *ptep;
 340        unsigned int wimg = 0;
 341        pgd_t *pgdir;
 342        unsigned long flags;
 343
 344        /* used to check for invalidations in progress */
 345        mmu_seq = kvm->mmu_notifier_seq;
 346        smp_rmb();
 347
 348        /*
 349         * Translate guest physical to true physical, acquiring
 350         * a page reference if it is normal, non-reserved memory.
 351         *
 352         * gfn_to_memslot() must succeed because otherwise we wouldn't
 353         * have gotten this far.  Eventually we should just pass the slot
 354         * pointer through from the first lookup.
 355         */
 356        slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
 357        hva = gfn_to_hva_memslot(slot, gfn);
 358
 359        if (tlbsel == 1) {
 360                struct vm_area_struct *vma;
 361                down_read(&current->mm->mmap_sem);
 362
 363                vma = find_vma(current->mm, hva);
 364                if (vma && hva >= vma->vm_start &&
 365                    (vma->vm_flags & VM_PFNMAP)) {
 366                        /*
 367                         * This VMA is a physically contiguous region (e.g.
 368                         * /dev/mem) that bypasses normal Linux page
 369                         * management.  Find the overlap between the
 370                         * vma and the memslot.
 371                         */
 372
 373                        unsigned long start, end;
 374                        unsigned long slot_start, slot_end;
 375
 376                        pfnmap = 1;
 377
 378                        start = vma->vm_pgoff;
 379                        end = start +
 380                              vma_pages(vma);
 381
 382                        pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
 383
 384                        slot_start = pfn - (gfn - slot->base_gfn);
 385                        slot_end = slot_start + slot->npages;
 386
 387                        if (start < slot_start)
 388                                start = slot_start;
 389                        if (end > slot_end)
 390                                end = slot_end;
 391
 392                        tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
 393                                MAS1_TSIZE_SHIFT;
 394
 395                        /*
 396                         * e500 doesn't implement the lowest tsize bit,
 397                         * or 1K pages.
 398                         */
 399                        tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
 400
 401                        /*
 402                         * Now find the largest tsize (up to what the guest
 403                         * requested) that will cover gfn, stay within the
 404                         * range, and for which gfn and pfn are mutually
 405                         * aligned.
 406                         */
 407
 408                        for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
 409                                unsigned long gfn_start, gfn_end;
 410                                tsize_pages = 1UL << (tsize - 2);
 411
 412                                gfn_start = gfn & ~(tsize_pages - 1);
 413                                gfn_end = gfn_start + tsize_pages;
 414
 415                                if (gfn_start + pfn - gfn < start)
 416                                        continue;
 417                                if (gfn_end + pfn - gfn > end)
 418                                        continue;
 419                                if ((gfn & (tsize_pages - 1)) !=
 420                                    (pfn & (tsize_pages - 1)))
 421                                        continue;
 422
 423                                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
 424                                pfn &= ~(tsize_pages - 1);
 425                                break;
 426                        }
 427                } else if (vma && hva >= vma->vm_start &&
 428                           (vma->vm_flags & VM_HUGETLB)) {
 429                        unsigned long psize = vma_kernel_pagesize(vma);
 430
 431                        tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
 432                                MAS1_TSIZE_SHIFT;
 433
 434                        /*
 435                         * Take the largest page size that satisfies both host
 436                         * and guest mapping
 437                         */
 438                        tsize = min(__ilog2(psize) - 10, tsize);
 439
 440                        /*
 441                         * e500 doesn't implement the lowest tsize bit,
 442                         * or 1K pages.
 443                         */
 444                        tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
 445                }
 446
 447                up_read(&current->mm->mmap_sem);
 448        }
 449
 450        if (likely(!pfnmap)) {
 451                tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
 452                pfn = gfn_to_pfn_memslot(slot, gfn);
 453                if (is_error_noslot_pfn(pfn)) {
 454                        if (printk_ratelimit())
 455                                pr_err("%s: real page not found for gfn %lx\n",
 456                                       __func__, (long)gfn);
 457                        return -EINVAL;
 458                }
 459
 460                /* Align guest and physical address to page map boundaries */
 461                pfn &= ~(tsize_pages - 1);
 462                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
 463        }
 464
 465        spin_lock(&kvm->mmu_lock);
 466        if (mmu_notifier_retry(kvm, mmu_seq)) {
 467                ret = -EAGAIN;
 468                goto out;
 469        }
 470
 471
 472        pgdir = vcpu_e500->vcpu.arch.pgdir;
 473        /*
 474         * We are just looking at the wimg bits, so we don't
 475         * care much about the trans splitting bit.
 476         * We are holding kvm->mmu_lock so a notifier invalidate
 477         * can't run hence pfn won't change.
 478         */
 479        local_irq_save(flags);
 480        ptep = find_linux_pte(pgdir, hva, NULL, NULL);
 481        if (ptep) {
 482                pte_t pte = READ_ONCE(*ptep);
 483
 484                if (pte_present(pte)) {
 485                        wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
 486                                MAS2_WIMGE_MASK;
 487                        local_irq_restore(flags);
 488                } else {
 489                        local_irq_restore(flags);
 490                        pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
 491                                           __func__, (long)gfn, pfn);
 492                        ret = -EINVAL;
 493                        goto out;
 494                }
 495        }
 496        kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
 497
 498        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
 499                                ref, gvaddr, stlbe);
 500
 501        /* Clear i-cache for new pages */
 502        kvmppc_mmu_flush_icache(pfn);
 503
 504out:
 505        spin_unlock(&kvm->mmu_lock);
 506
 507        /* Drop refcount on page, so that mmu notifiers can clear it */
 508        kvm_release_pfn_clean(pfn);
 509
 510        return ret;
 511}
 512
 513/* XXX only map the one-one case, for now use TLB0 */
 514static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
 515                                struct kvm_book3e_206_tlb_entry *stlbe)
 516{
 517        struct kvm_book3e_206_tlb_entry *gtlbe;
 518        struct tlbe_ref *ref;
 519        int stlbsel = 0;
 520        int sesel = 0;
 521        int r;
 522
 523        gtlbe = get_entry(vcpu_e500, 0, esel);
 524        ref = &vcpu_e500->gtlb_priv[0][esel].ref;
 525
 526        r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 527                        get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
 528                        gtlbe, 0, stlbe, ref);
 529        if (r)
 530                return r;
 531
 532        write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
 533
 534        return 0;
 535}
 536
 537static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
 538                                     struct tlbe_ref *ref,
 539                                     int esel)
 540{
 541        unsigned int sesel = vcpu_e500->host_tlb1_nv++;
 542
 543        if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
 544                vcpu_e500->host_tlb1_nv = 0;
 545
 546        if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
 547                unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
 548                vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
 549        }
 550
 551        vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
 552        vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
 553        vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
 554        WARN_ON(!(ref->flags & E500_TLB_VALID));
 555
 556        return sesel;
 557}
 558
 559/* Caller must ensure that the specified guest TLB entry is safe to insert into
 560 * the shadow TLB. */
 561/* For both one-one and one-to-many */
 562static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 563                u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
 564                struct kvm_book3e_206_tlb_entry *stlbe, int esel)
 565{
 566        struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
 567        int sesel;
 568        int r;
 569
 570        r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
 571                                   ref);
 572        if (r)
 573                return r;
 574
 575        /* Use TLB0 when we can only map a page with 4k */
 576        if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
 577                vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
 578                write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
 579                return 0;
 580        }
 581
 582        /* Otherwise map into TLB1 */
 583        sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
 584        write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
 585
 586        return 0;
 587}
 588
 589void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 590                    unsigned int index)
 591{
 592        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 593        struct tlbe_priv *priv;
 594        struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
 595        int tlbsel = tlbsel_of(index);
 596        int esel = esel_of(index);
 597
 598        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 599
 600        switch (tlbsel) {
 601        case 0:
 602                priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 603
 604                /* Triggers after clear_tlb_privs or on initial mapping */
 605                if (!(priv->ref.flags & E500_TLB_VALID)) {
 606                        kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
 607                } else {
 608                        kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
 609                                                &priv->ref, eaddr, &stlbe);
 610                        write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
 611                }
 612                break;
 613
 614        case 1: {
 615                gfn_t gfn = gpaddr >> PAGE_SHIFT;
 616                kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
 617                                     esel);
 618                break;
 619        }
 620
 621        default:
 622                BUG();
 623                break;
 624        }
 625}
 626
 627#ifdef CONFIG_KVM_BOOKE_HV
 628int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
 629                enum instruction_fetch_type type, u32 *instr)
 630{
 631        gva_t geaddr;
 632        hpa_t addr;
 633        hfn_t pfn;
 634        hva_t eaddr;
 635        u32 mas1, mas2, mas3;
 636        u64 mas7_mas3;
 637        struct page *page;
 638        unsigned int addr_space, psize_shift;
 639        bool pr;
 640        unsigned long flags;
 641
 642        /* Search TLB for guest pc to get the real address */
 643        geaddr = kvmppc_get_pc(vcpu);
 644
 645        addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
 646
 647        local_irq_save(flags);
 648        mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
 649        mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
 650        asm volatile("tlbsx 0, %[geaddr]\n" : :
 651                     [geaddr] "r" (geaddr));
 652        mtspr(SPRN_MAS5, 0);
 653        mtspr(SPRN_MAS8, 0);
 654        mas1 = mfspr(SPRN_MAS1);
 655        mas2 = mfspr(SPRN_MAS2);
 656        mas3 = mfspr(SPRN_MAS3);
 657#ifdef CONFIG_64BIT
 658        mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
 659#else
 660        mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
 661#endif
 662        local_irq_restore(flags);
 663
 664        /*
 665         * If the TLB entry for guest pc was evicted, return to the guest.
 666         * There are high chances to find a valid TLB entry next time.
 667         */
 668        if (!(mas1 & MAS1_VALID))
 669                return EMULATE_AGAIN;
 670
 671        /*
 672         * Another thread may rewrite the TLB entry in parallel, don't
 673         * execute from the address if the execute permission is not set
 674         */
 675        pr = vcpu->arch.shared->msr & MSR_PR;
 676        if (unlikely((pr && !(mas3 & MAS3_UX)) ||
 677                     (!pr && !(mas3 & MAS3_SX)))) {
 678                pr_err_ratelimited(
 679                        "%s: Instruction emulation from guest address %08lx without execute permission\n",
 680                        __func__, geaddr);
 681                return EMULATE_AGAIN;
 682        }
 683
 684        /*
 685         * The real address will be mapped by a cacheable, memory coherent,
 686         * write-back page. Check for mismatches when LRAT is used.
 687         */
 688        if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
 689            unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
 690                pr_err_ratelimited(
 691                        "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n",
 692                        __func__, geaddr);
 693                return EMULATE_AGAIN;
 694        }
 695
 696        /* Get pfn */
 697        psize_shift = MAS1_GET_TSIZE(mas1) + 10;
 698        addr = (mas7_mas3 & (~0ULL << psize_shift)) |
 699               (geaddr & ((1ULL << psize_shift) - 1ULL));
 700        pfn = addr >> PAGE_SHIFT;
 701
 702        /* Guard against emulation from devices area */
 703        if (unlikely(!page_is_ram(pfn))) {
 704                pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n",
 705                         __func__, addr);
 706                return EMULATE_AGAIN;
 707        }
 708
 709        /* Map a page and get guest's instruction */
 710        page = pfn_to_page(pfn);
 711        eaddr = (unsigned long)kmap_atomic(page);
 712        *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
 713        kunmap_atomic((u32 *)eaddr);
 714
 715        return EMULATE_DONE;
 716}
 717#else
 718int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
 719                enum instruction_fetch_type type, u32 *instr)
 720{
 721        return EMULATE_AGAIN;
 722}
 723#endif
 724
 725/************* MMU Notifiers *************/
 726
 727static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
 728{
 729        trace_kvm_unmap_hva(hva);
 730
 731        /*
 732         * Flush all shadow tlb entries everywhere. This is slow, but
 733         * we are 100% sure that we catch the to be unmapped page
 734         */
 735        kvm_flush_remote_tlbs(kvm);
 736
 737        return 0;
 738}
 739
 740int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 741{
 742        /* kvm_unmap_hva flushes everything anyways */
 743        kvm_unmap_hva(kvm, start);
 744
 745        return 0;
 746}
 747
 748int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
 749{
 750        /* XXX could be more clever ;) */
 751        return 0;
 752}
 753
 754int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 755{
 756        /* XXX could be more clever ;) */
 757        return 0;
 758}
 759
 760int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 761{
 762        /* The page will get remapped properly on its next fault */
 763        kvm_unmap_hva(kvm, hva);
 764        return 0;
 765}
 766
 767/*****************************************/
 768
 769int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 770{
 771        host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
 772        host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
 773
 774        /*
 775         * This should never happen on real e500 hardware, but is
 776         * architecturally possible -- e.g. in some weird nested
 777         * virtualization case.
 778         */
 779        if (host_tlb_params[0].entries == 0 ||
 780            host_tlb_params[1].entries == 0) {
 781                pr_err("%s: need to know host tlb size\n", __func__);
 782                return -ENODEV;
 783        }
 784
 785        host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
 786                                  TLBnCFG_ASSOC_SHIFT;
 787        host_tlb_params[1].ways = host_tlb_params[1].entries;
 788
 789        if (!is_power_of_2(host_tlb_params[0].entries) ||
 790            !is_power_of_2(host_tlb_params[0].ways) ||
 791            host_tlb_params[0].entries < host_tlb_params[0].ways ||
 792            host_tlb_params[0].ways == 0) {
 793                pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
 794                       __func__, host_tlb_params[0].entries,
 795                       host_tlb_params[0].ways);
 796                return -ENODEV;
 797        }
 798
 799        host_tlb_params[0].sets =
 800                host_tlb_params[0].entries / host_tlb_params[0].ways;
 801        host_tlb_params[1].sets = 1;
 802        vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries,
 803                                           sizeof(*vcpu_e500->h2g_tlb1_rmap),
 804                                           GFP_KERNEL);
 805        if (!vcpu_e500->h2g_tlb1_rmap)
 806                return -EINVAL;
 807
 808        return 0;
 809}
 810
 811void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 812{
 813        kfree(vcpu_e500->h2g_tlb1_rmap);
 814}
 815