linux/arch/powerpc/kvm/book3s_64_mmu.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright SUSE Linux Products GmbH 2009
  16 *
  17 * Authors: Alexander Graf <agraf@suse.de>
  18 */
  19
  20#include <linux/types.h>
  21#include <linux/string.h>
  22#include <linux/kvm.h>
  23#include <linux/kvm_host.h>
  24#include <linux/highmem.h>
  25
  26#include <asm/kvm_ppc.h>
  27#include <asm/kvm_book3s.h>
  28#include <asm/book3s/64/mmu-hash.h>
  29
  30/* #define DEBUG_MMU */
  31
  32#ifdef DEBUG_MMU
  33#define dprintk(X...) printk(KERN_INFO X)
  34#else
  35#define dprintk(X...) do { } while(0)
  36#endif
  37
  38static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
  39{
  40        unsigned long msr = vcpu->arch.intr_msr;
  41        unsigned long cur_msr = kvmppc_get_msr(vcpu);
  42
  43        /* If transactional, change to suspend mode on IRQ delivery */
  44        if (MSR_TM_TRANSACTIONAL(cur_msr))
  45                msr |= MSR_TS_S;
  46        else
  47                msr |= cur_msr & MSR_TS_MASK;
  48
  49        kvmppc_set_msr(vcpu, msr);
  50}
  51
  52static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
  53                                struct kvm_vcpu *vcpu,
  54                                gva_t eaddr)
  55{
  56        int i;
  57        u64 esid = GET_ESID(eaddr);
  58        u64 esid_1t = GET_ESID_1T(eaddr);
  59
  60        for (i = 0; i < vcpu->arch.slb_nr; i++) {
  61                u64 cmp_esid = esid;
  62
  63                if (!vcpu->arch.slb[i].valid)
  64                        continue;
  65
  66                if (vcpu->arch.slb[i].tb)
  67                        cmp_esid = esid_1t;
  68
  69                if (vcpu->arch.slb[i].esid == cmp_esid)
  70                        return &vcpu->arch.slb[i];
  71        }
  72
  73        dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
  74                eaddr, esid, esid_1t);
  75        for (i = 0; i < vcpu->arch.slb_nr; i++) {
  76            if (vcpu->arch.slb[i].vsid)
  77                dprintk("  %d: %c%c%c %llx %llx\n", i,
  78                        vcpu->arch.slb[i].valid ? 'v' : ' ',
  79                        vcpu->arch.slb[i].large ? 'l' : ' ',
  80                        vcpu->arch.slb[i].tb    ? 't' : ' ',
  81                        vcpu->arch.slb[i].esid,
  82                        vcpu->arch.slb[i].vsid);
  83        }
  84
  85        return NULL;
  86}
  87
  88static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
  89{
  90        return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
  91}
  92
  93static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
  94{
  95        return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
  96}
  97
  98static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
  99{
 100        eaddr &= kvmppc_slb_offset_mask(slb);
 101
 102        return (eaddr >> VPN_SHIFT) |
 103                ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
 104}
 105
 106static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
 107                                         bool data)
 108{
 109        struct kvmppc_slb *slb;
 110
 111        slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
 112        if (!slb)
 113                return 0;
 114
 115        return kvmppc_slb_calc_vpn(slb, eaddr);
 116}
 117
 118static int mmu_pagesize(int mmu_pg)
 119{
 120        switch (mmu_pg) {
 121        case MMU_PAGE_64K:
 122                return 16;
 123        case MMU_PAGE_16M:
 124                return 24;
 125        }
 126        return 12;
 127}
 128
 129static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
 130{
 131        return mmu_pagesize(slbe->base_page_size);
 132}
 133
 134static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
 135{
 136        int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
 137
 138        return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
 139}
 140
 141static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu,
 142                                struct kvmppc_slb *slbe, gva_t eaddr,
 143                                bool second)
 144{
 145        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 146        u64 hash, pteg, htabsize;
 147        u32 ssize;
 148        hva_t r;
 149        u64 vpn;
 150
 151        htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
 152
 153        vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
 154        ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
 155        hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
 156        if (second)
 157                hash = ~hash;
 158        hash &= ((1ULL << 39ULL) - 1ULL);
 159        hash &= htabsize;
 160        hash <<= 7ULL;
 161
 162        pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
 163        pteg |= hash;
 164
 165        dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
 166                page, vcpu_book3s->sdr1, pteg, slbe->vsid);
 167
 168        /* When running a PAPR guest, SDR1 contains a HVA address instead
 169           of a GPA */
 170        if (vcpu->arch.papr_enabled)
 171                r = pteg;
 172        else
 173                r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
 174
 175        if (kvm_is_error_hva(r))
 176                return r;
 177        return r | (pteg & ~PAGE_MASK);
 178}
 179
 180static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
 181{
 182        int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
 183        u64 avpn;
 184
 185        avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
 186        avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
 187
 188        if (p < 16)
 189                avpn >>= ((80 - p) - 56) - 8;   /* 16 - p */
 190        else
 191                avpn <<= p - 16;
 192
 193        return avpn;
 194}
 195
 196/*
 197 * Return page size encoded in the second word of a HPTE, or
 198 * -1 for an invalid encoding for the base page size indicated by
 199 * the SLB entry.  This doesn't handle mixed pagesize segments yet.
 200 */
 201static int decode_pagesize(struct kvmppc_slb *slbe, u64 r)
 202{
 203        switch (slbe->base_page_size) {
 204        case MMU_PAGE_64K:
 205                if ((r & 0xf000) == 0x1000)
 206                        return MMU_PAGE_64K;
 207                break;
 208        case MMU_PAGE_16M:
 209                if ((r & 0xff000) == 0)
 210                        return MMU_PAGE_16M;
 211                break;
 212        }
 213        return -1;
 214}
 215
 216static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
 217                                      struct kvmppc_pte *gpte, bool data,
 218                                      bool iswrite)
 219{
 220        struct kvmppc_slb *slbe;
 221        hva_t ptegp;
 222        u64 pteg[16];
 223        u64 avpn = 0;
 224        u64 v, r;
 225        u64 v_val, v_mask;
 226        u64 eaddr_mask;
 227        int i;
 228        u8 pp, key = 0;
 229        bool found = false;
 230        bool second = false;
 231        int pgsize;
 232        ulong mp_ea = vcpu->arch.magic_page_ea;
 233
 234        /* Magic page override */
 235        if (unlikely(mp_ea) &&
 236            unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
 237            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 238                gpte->eaddr = eaddr;
 239                gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
 240                gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
 241                gpte->raddr &= KVM_PAM;
 242                gpte->may_execute = true;
 243                gpte->may_read = true;
 244                gpte->may_write = true;
 245                gpte->page_size = MMU_PAGE_4K;
 246                gpte->wimg = HPTE_R_M;
 247
 248                return 0;
 249        }
 250
 251        slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
 252        if (!slbe)
 253                goto no_seg_found;
 254
 255        avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
 256        v_val = avpn & HPTE_V_AVPN;
 257
 258        if (slbe->tb)
 259                v_val |= SLB_VSID_B_1T;
 260        if (slbe->large)
 261                v_val |= HPTE_V_LARGE;
 262        v_val |= HPTE_V_VALID;
 263
 264        v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
 265                HPTE_V_SECONDARY;
 266
 267        pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K;
 268
 269        mutex_lock(&vcpu->kvm->arch.hpt_mutex);
 270
 271do_second:
 272        ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second);
 273        if (kvm_is_error_hva(ptegp))
 274                goto no_page_found;
 275
 276        if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
 277                printk_ratelimited(KERN_ERR
 278                        "KVM: Can't copy data from 0x%lx!\n", ptegp);
 279                goto no_page_found;
 280        }
 281
 282        if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
 283                key = 4;
 284        else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
 285                key = 4;
 286
 287        for (i=0; i<16; i+=2) {
 288                u64 pte0 = be64_to_cpu(pteg[i]);
 289                u64 pte1 = be64_to_cpu(pteg[i + 1]);
 290
 291                /* Check all relevant fields of 1st dword */
 292                if ((pte0 & v_mask) == v_val) {
 293                        /* If large page bit is set, check pgsize encoding */
 294                        if (slbe->large &&
 295                            (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
 296                                pgsize = decode_pagesize(slbe, pte1);
 297                                if (pgsize < 0)
 298                                        continue;
 299                        }
 300                        found = true;
 301                        break;
 302                }
 303        }
 304
 305        if (!found) {
 306                if (second)
 307                        goto no_page_found;
 308                v_val |= HPTE_V_SECONDARY;
 309                second = true;
 310                goto do_second;
 311        }
 312
 313        v = be64_to_cpu(pteg[i]);
 314        r = be64_to_cpu(pteg[i+1]);
 315        pp = (r & HPTE_R_PP) | key;
 316        if (r & HPTE_R_PP0)
 317                pp |= 8;
 318
 319        gpte->eaddr = eaddr;
 320        gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
 321
 322        eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1;
 323        gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
 324        gpte->page_size = pgsize;
 325        gpte->may_execute = ((r & HPTE_R_N) ? false : true);
 326        if (unlikely(vcpu->arch.disable_kernel_nx) &&
 327            !(kvmppc_get_msr(vcpu) & MSR_PR))
 328                gpte->may_execute = true;
 329        gpte->may_read = false;
 330        gpte->may_write = false;
 331        gpte->wimg = r & HPTE_R_WIMG;
 332
 333        switch (pp) {
 334        case 0:
 335        case 1:
 336        case 2:
 337        case 6:
 338                gpte->may_write = true;
 339                /* fall through */
 340        case 3:
 341        case 5:
 342        case 7:
 343        case 10:
 344                gpte->may_read = true;
 345                break;
 346        }
 347
 348        dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
 349                "-> 0x%lx\n",
 350                eaddr, avpn, gpte->vpage, gpte->raddr);
 351
 352        /* Update PTE R and C bits, so the guest's swapper knows we used the
 353         * page */
 354        if (gpte->may_read && !(r & HPTE_R_R)) {
 355                /*
 356                 * Set the accessed flag.
 357                 * We have to write this back with a single byte write
 358                 * because another vcpu may be accessing this on
 359                 * non-PAPR platforms such as mac99, and this is
 360                 * what real hardware does.
 361                 */
 362                char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
 363                r |= HPTE_R_R;
 364                put_user(r >> 8, addr + 6);
 365        }
 366        if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
 367                /* Set the dirty flag */
 368                /* Use a single byte write */
 369                char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
 370                r |= HPTE_R_C;
 371                put_user(r, addr + 7);
 372        }
 373
 374        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 375
 376        if (!gpte->may_read || (iswrite && !gpte->may_write))
 377                return -EPERM;
 378        return 0;
 379
 380no_page_found:
 381        mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
 382        return -ENOENT;
 383
 384no_seg_found:
 385        dprintk("KVM MMU: Trigger segment fault\n");
 386        return -EINVAL;
 387}
 388
 389static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
 390{
 391        u64 esid, esid_1t;
 392        int slb_nr;
 393        struct kvmppc_slb *slbe;
 394
 395        dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
 396
 397        esid = GET_ESID(rb);
 398        esid_1t = GET_ESID_1T(rb);
 399        slb_nr = rb & 0xfff;
 400
 401        if (slb_nr > vcpu->arch.slb_nr)
 402                return;
 403
 404        slbe = &vcpu->arch.slb[slb_nr];
 405
 406        slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
 407        slbe->tb    = (rs & SLB_VSID_B_1T) ? 1 : 0;
 408        slbe->esid  = slbe->tb ? esid_1t : esid;
 409        slbe->vsid  = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
 410        slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
 411        slbe->Ks    = (rs & SLB_VSID_KS) ? 1 : 0;
 412        slbe->Kp    = (rs & SLB_VSID_KP) ? 1 : 0;
 413        slbe->nx    = (rs & SLB_VSID_N) ? 1 : 0;
 414        slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
 415
 416        slbe->base_page_size = MMU_PAGE_4K;
 417        if (slbe->large) {
 418                if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) {
 419                        switch (rs & SLB_VSID_LP) {
 420                        case SLB_VSID_LP_00:
 421                                slbe->base_page_size = MMU_PAGE_16M;
 422                                break;
 423                        case SLB_VSID_LP_01:
 424                                slbe->base_page_size = MMU_PAGE_64K;
 425                                break;
 426                        }
 427                } else
 428                        slbe->base_page_size = MMU_PAGE_16M;
 429        }
 430
 431        slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
 432        slbe->origv = rs;
 433
 434        /* Map the new segment */
 435        kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
 436}
 437
 438static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
 439{
 440        struct kvmppc_slb *slbe;
 441
 442        if (slb_nr > vcpu->arch.slb_nr)
 443                return 0;
 444
 445        slbe = &vcpu->arch.slb[slb_nr];
 446
 447        return slbe->orige;
 448}
 449
 450static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
 451{
 452        struct kvmppc_slb *slbe;
 453
 454        if (slb_nr > vcpu->arch.slb_nr)
 455                return 0;
 456
 457        slbe = &vcpu->arch.slb[slb_nr];
 458
 459        return slbe->origv;
 460}
 461
 462static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
 463{
 464        struct kvmppc_slb *slbe;
 465        u64 seg_size;
 466
 467        dprintk("KVM MMU: slbie(0x%llx)\n", ea);
 468
 469        slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
 470
 471        if (!slbe)
 472                return;
 473
 474        dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
 475
 476        slbe->valid = false;
 477        slbe->orige = 0;
 478        slbe->origv = 0;
 479
 480        seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
 481        kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
 482}
 483
 484static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
 485{
 486        int i;
 487
 488        dprintk("KVM MMU: slbia()\n");
 489
 490        for (i = 1; i < vcpu->arch.slb_nr; i++) {
 491                vcpu->arch.slb[i].valid = false;
 492                vcpu->arch.slb[i].orige = 0;
 493                vcpu->arch.slb[i].origv = 0;
 494        }
 495
 496        if (kvmppc_get_msr(vcpu) & MSR_IR) {
 497                kvmppc_mmu_flush_segments(vcpu);
 498                kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
 499        }
 500}
 501
 502static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
 503                                        ulong value)
 504{
 505        u64 rb = 0, rs = 0;
 506
 507        /*
 508         * According to Book3 2.01 mtsrin is implemented as:
 509         *
 510         * The SLB entry specified by (RB)32:35 is loaded from register
 511         * RS, as follows.
 512         *
 513         * SLBE Bit     Source                  SLB Field
 514         *
 515         * 0:31         0x0000_0000             ESID-0:31
 516         * 32:35        (RB)32:35               ESID-32:35
 517         * 36           0b1                     V
 518         * 37:61        0x00_0000|| 0b0         VSID-0:24
 519         * 62:88        (RS)37:63               VSID-25:51
 520         * 89:91        (RS)33:35               Ks Kp N
 521         * 92           (RS)36                  L ((RS)36 must be 0b0)
 522         * 93           0b0                     C
 523         */
 524
 525        dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
 526
 527        /* ESID = srnum */
 528        rb |= (srnum & 0xf) << 28;
 529        /* Set the valid bit */
 530        rb |= 1 << 27;
 531        /* Index = ESID */
 532        rb |= srnum;
 533
 534        /* VSID = VSID */
 535        rs |= (value & 0xfffffff) << 12;
 536        /* flags = flags */
 537        rs |= ((value >> 28) & 0x7) << 9;
 538
 539        kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
 540}
 541
 542static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
 543                                       bool large)
 544{
 545        u64 mask = 0xFFFFFFFFFULL;
 546        long i;
 547        struct kvm_vcpu *v;
 548
 549        dprintk("KVM MMU: tlbie(0x%lx)\n", va);
 550
 551        /*
 552         * The tlbie instruction changed behaviour starting with
 553         * POWER6.  POWER6 and later don't have the large page flag
 554         * in the instruction but in the RB value, along with bits
 555         * indicating page and segment sizes.
 556         */
 557        if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) {
 558                /* POWER6 or later */
 559                if (va & 1) {           /* L bit */
 560                        if ((va & 0xf000) == 0x1000)
 561                                mask = 0xFFFFFFFF0ULL;  /* 64k page */
 562                        else
 563                                mask = 0xFFFFFF000ULL;  /* 16M page */
 564                }
 565        } else {
 566                /* older processors, e.g. PPC970 */
 567                if (large)
 568                        mask = 0xFFFFFF000ULL;
 569        }
 570        /* flush this VA on all vcpus */
 571        kvm_for_each_vcpu(i, v, vcpu->kvm)
 572                kvmppc_mmu_pte_vflush(v, va >> 12, mask);
 573}
 574
 575#ifdef CONFIG_PPC_64K_PAGES
 576static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
 577{
 578        ulong mp_ea = vcpu->arch.magic_page_ea;
 579
 580        return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
 581                (mp_ea >> SID_SHIFT) == esid;
 582}
 583#endif
 584
 585static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
 586                                             u64 *vsid)
 587{
 588        ulong ea = esid << SID_SHIFT;
 589        struct kvmppc_slb *slb;
 590        u64 gvsid = esid;
 591        ulong mp_ea = vcpu->arch.magic_page_ea;
 592        int pagesize = MMU_PAGE_64K;
 593        u64 msr = kvmppc_get_msr(vcpu);
 594
 595        if (msr & (MSR_DR|MSR_IR)) {
 596                slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
 597                if (slb) {
 598                        gvsid = slb->vsid;
 599                        pagesize = slb->base_page_size;
 600                        if (slb->tb) {
 601                                gvsid <<= SID_SHIFT_1T - SID_SHIFT;
 602                                gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
 603                                gvsid |= VSID_1T;
 604                        }
 605                }
 606        }
 607
 608        switch (msr & (MSR_DR|MSR_IR)) {
 609        case 0:
 610                gvsid = VSID_REAL | esid;
 611                break;
 612        case MSR_IR:
 613                gvsid |= VSID_REAL_IR;
 614                break;
 615        case MSR_DR:
 616                gvsid |= VSID_REAL_DR;
 617                break;
 618        case MSR_DR|MSR_IR:
 619                if (!slb)
 620                        goto no_slb;
 621
 622                break;
 623        default:
 624                BUG();
 625                break;
 626        }
 627
 628#ifdef CONFIG_PPC_64K_PAGES
 629        /*
 630         * Mark this as a 64k segment if the host is using
 631         * 64k pages, the host MMU supports 64k pages and
 632         * the guest segment page size is >= 64k,
 633         * but not if this segment contains the magic page.
 634         */
 635        if (pagesize >= MMU_PAGE_64K &&
 636            mmu_psize_defs[MMU_PAGE_64K].shift &&
 637            !segment_contains_magic_page(vcpu, esid))
 638                gvsid |= VSID_64K;
 639#endif
 640
 641        if (kvmppc_get_msr(vcpu) & MSR_PR)
 642                gvsid |= VSID_PR;
 643
 644        *vsid = gvsid;
 645        return 0;
 646
 647no_slb:
 648        /* Catch magic page case */
 649        if (unlikely(mp_ea) &&
 650            unlikely(esid == (mp_ea >> SID_SHIFT)) &&
 651            !(kvmppc_get_msr(vcpu) & MSR_PR)) {
 652                *vsid = VSID_REAL | esid;
 653                return 0;
 654        }
 655
 656        return -EINVAL;
 657}
 658
 659static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
 660{
 661        return (to_book3s(vcpu)->hid[5] & 0x80);
 662}
 663
 664void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
 665{
 666        struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
 667
 668        mmu->mfsrin = NULL;
 669        mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
 670        mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
 671        mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
 672        mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
 673        mmu->slbie = kvmppc_mmu_book3s_64_slbie;
 674        mmu->slbia = kvmppc_mmu_book3s_64_slbia;
 675        mmu->xlate = kvmppc_mmu_book3s_64_xlate;
 676        mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
 677        mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
 678        mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
 679        mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
 680        mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
 681
 682        vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
 683}
 684