linux/arch/powerpc/kvm/e500_tlb.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Yu Liu, yu.liu@freescale.com
   5 *
   6 * Description:
   7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
   8 * by Hollis Blanchard <hollisb@us.ibm.com>.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License, version 2, as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/types.h>
  16#include <linux/string.h>
  17#include <linux/kvm.h>
  18#include <linux/kvm_host.h>
  19#include <linux/highmem.h>
  20#include <asm/kvm_ppc.h>
  21#include <asm/kvm_e500.h>
  22
  23#include "../mm/mmu_decl.h"
  24#include "e500_tlb.h"
  25#include "trace.h"
  26
  27#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
  28
  29static unsigned int tlb1_entry_num;
  30
  31void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
  32{
  33        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  34        struct tlbe *tlbe;
  35        int i, tlbsel;
  36
  37        printk("| %8s | %8s | %8s | %8s | %8s |\n",
  38                        "nr", "mas1", "mas2", "mas3", "mas7");
  39
  40        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
  41                printk("Guest TLB%d:\n", tlbsel);
  42                for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
  43                        tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
  44                        if (tlbe->mas1 & MAS1_VALID)
  45                                printk(" G[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
  46                                        tlbsel, i, tlbe->mas1, tlbe->mas2,
  47                                        tlbe->mas3, tlbe->mas7);
  48                }
  49        }
  50
  51        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
  52                printk("Shadow TLB%d:\n", tlbsel);
  53                for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
  54                        tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
  55                        if (tlbe->mas1 & MAS1_VALID)
  56                                printk(" S[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
  57                                        tlbsel, i, tlbe->mas1, tlbe->mas2,
  58                                        tlbe->mas3, tlbe->mas7);
  59                }
  60        }
  61}
  62
  63static inline unsigned int tlb0_get_next_victim(
  64                struct kvmppc_vcpu_e500 *vcpu_e500)
  65{
  66        unsigned int victim;
  67
  68        victim = vcpu_e500->guest_tlb_nv[0]++;
  69        if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
  70                vcpu_e500->guest_tlb_nv[0] = 0;
  71
  72        return victim;
  73}
  74
  75static inline unsigned int tlb1_max_shadow_size(void)
  76{
  77        return tlb1_entry_num - tlbcam_index;
  78}
  79
  80static inline int tlbe_is_writable(struct tlbe *tlbe)
  81{
  82        return tlbe->mas3 & (MAS3_SW|MAS3_UW);
  83}
  84
  85static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
  86{
  87        /* Mask off reserved bits. */
  88        mas3 &= MAS3_ATTRIB_MASK;
  89
  90        if (!usermode) {
  91                /* Guest is in supervisor mode,
  92                 * so we need to translate guest
  93                 * supervisor permissions into user permissions. */
  94                mas3 &= ~E500_TLB_USER_PERM_MASK;
  95                mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
  96        }
  97
  98        return mas3 | E500_TLB_SUPER_PERM_MASK;
  99}
 100
 101static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
 102{
 103#ifdef CONFIG_SMP
 104        return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
 105#else
 106        return mas2 & MAS2_ATTRIB_MASK;
 107#endif
 108}
 109
 110/*
 111 * writing shadow tlb entry to host TLB
 112 */
 113static inline void __write_host_tlbe(struct tlbe *stlbe)
 114{
 115        mtspr(SPRN_MAS1, stlbe->mas1);
 116        mtspr(SPRN_MAS2, stlbe->mas2);
 117        mtspr(SPRN_MAS3, stlbe->mas3);
 118        mtspr(SPRN_MAS7, stlbe->mas7);
 119        __asm__ __volatile__ ("tlbwe\n" : : );
 120}
 121
 122static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 123                int tlbsel, int esel)
 124{
 125        struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
 126
 127        local_irq_disable();
 128        if (tlbsel == 0) {
 129                __write_host_tlbe(stlbe);
 130        } else {
 131                unsigned register mas0;
 132
 133                mas0 = mfspr(SPRN_MAS0);
 134
 135                mtspr(SPRN_MAS0, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel)));
 136                __write_host_tlbe(stlbe);
 137
 138                mtspr(SPRN_MAS0, mas0);
 139        }
 140        local_irq_enable();
 141}
 142
 143void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
 144{
 145        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 146        int i;
 147        unsigned register mas0;
 148
 149        /* Load all valid TLB1 entries to reduce guest tlb miss fault */
 150        local_irq_disable();
 151        mas0 = mfspr(SPRN_MAS0);
 152        for (i = 0; i < tlb1_max_shadow_size(); i++) {
 153                struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
 154
 155                if (get_tlb_v(stlbe)) {
 156                        mtspr(SPRN_MAS0, MAS0_TLBSEL(1)
 157                                        | MAS0_ESEL(to_htlb1_esel(i)));
 158                        __write_host_tlbe(stlbe);
 159                }
 160        }
 161        mtspr(SPRN_MAS0, mas0);
 162        local_irq_enable();
 163}
 164
 165void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
 166{
 167        _tlbil_all();
 168}
 169
 170/* Search the guest TLB for a matching entry. */
 171static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
 172                gva_t eaddr, int tlbsel, unsigned int pid, int as)
 173{
 174        int i;
 175
 176        /* XXX Replace loop with fancy data structures. */
 177        for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
 178                struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
 179                unsigned int tid;
 180
 181                if (eaddr < get_tlb_eaddr(tlbe))
 182                        continue;
 183
 184                if (eaddr > get_tlb_end(tlbe))
 185                        continue;
 186
 187                tid = get_tlb_tid(tlbe);
 188                if (tid && (tid != pid))
 189                        continue;
 190
 191                if (!get_tlb_v(tlbe))
 192                        continue;
 193
 194                if (get_tlb_ts(tlbe) != as && as != -1)
 195                        continue;
 196
 197                return i;
 198        }
 199
 200        return -1;
 201}
 202
 203static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
 204                int tlbsel, int esel)
 205{
 206        struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
 207        struct page *page = vcpu_e500->shadow_pages[tlbsel][esel];
 208
 209        if (page) {
 210                vcpu_e500->shadow_pages[tlbsel][esel] = NULL;
 211
 212                if (get_tlb_v(stlbe)) {
 213                        if (tlbe_is_writable(stlbe))
 214                                kvm_release_page_dirty(page);
 215                        else
 216                                kvm_release_page_clean(page);
 217                }
 218        }
 219}
 220
 221static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
 222                int tlbsel, int esel)
 223{
 224        struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
 225
 226        kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
 227        stlbe->mas1 = 0;
 228        trace_kvm_stlb_inval(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
 229                             stlbe->mas3, stlbe->mas7);
 230}
 231
 232static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
 233                gva_t eaddr, gva_t eend, u32 tid)
 234{
 235        unsigned int pid = tid & 0xff;
 236        unsigned int i;
 237
 238        /* XXX Replace loop with fancy data structures. */
 239        for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
 240                struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
 241                unsigned int tid;
 242
 243                if (!get_tlb_v(stlbe))
 244                        continue;
 245
 246                if (eend < get_tlb_eaddr(stlbe))
 247                        continue;
 248
 249                if (eaddr > get_tlb_end(stlbe))
 250                        continue;
 251
 252                tid = get_tlb_tid(stlbe);
 253                if (tid && (tid != pid))
 254                        continue;
 255
 256                kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
 257                write_host_tlbe(vcpu_e500, 1, i);
 258        }
 259}
 260
 261static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
 262                unsigned int eaddr, int as)
 263{
 264        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 265        unsigned int victim, pidsel, tsized;
 266        int tlbsel;
 267
 268        /* since we only have two TLBs, only lower bit is used. */
 269        tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
 270        victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
 271        pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
 272        tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
 273
 274        vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
 275                | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
 276        vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
 277                | MAS1_TID(vcpu_e500->pid[pidsel])
 278                | MAS1_TSIZE(tsized);
 279        vcpu_e500->mas2 = (eaddr & MAS2_EPN)
 280                | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
 281        vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
 282        vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
 283                | (get_cur_pid(vcpu) << 16)
 284                | (as ? MAS6_SAS : 0);
 285        vcpu_e500->mas7 = 0;
 286}
 287
 288static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 289        u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
 290{
 291        struct page *new_page;
 292        struct tlbe *stlbe;
 293        hpa_t hpaddr;
 294
 295        stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
 296
 297        /* Get reference to new page. */
 298        new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
 299        if (is_error_page(new_page)) {
 300                printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn);
 301                kvm_release_page_clean(new_page);
 302                return;
 303        }
 304        hpaddr = page_to_phys(new_page);
 305
 306        /* Drop reference to old page. */
 307        kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
 308
 309        vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
 310
 311        /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
 312        stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
 313                | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
 314        stlbe->mas2 = (gvaddr & MAS2_EPN)
 315                | e500_shadow_mas2_attrib(gtlbe->mas2,
 316                                vcpu_e500->vcpu.arch.msr & MSR_PR);
 317        stlbe->mas3 = (hpaddr & MAS3_RPN)
 318                | e500_shadow_mas3_attrib(gtlbe->mas3,
 319                                vcpu_e500->vcpu.arch.msr & MSR_PR);
 320        stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
 321
 322        trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
 323                             stlbe->mas3, stlbe->mas7);
 324}
 325
 326/* XXX only map the one-one case, for now use TLB0 */
 327static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 328                int tlbsel, int esel)
 329{
 330        struct tlbe *gtlbe;
 331
 332        gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 333
 334        kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 335                        get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
 336                        gtlbe, tlbsel, esel);
 337
 338        return esel;
 339}
 340
 341/* Caller must ensure that the specified guest TLB entry is safe to insert into
 342 * the shadow TLB. */
 343/* XXX for both one-one and one-to-many , for now use TLB1 */
 344static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 345                u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
 346{
 347        unsigned int victim;
 348
 349        victim = vcpu_e500->guest_tlb_nv[1]++;
 350
 351        if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
 352                vcpu_e500->guest_tlb_nv[1] = 0;
 353
 354        kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
 355
 356        return victim;
 357}
 358
 359/* Invalidate all guest kernel mappings when enter usermode,
 360 * so that when they fault back in they will get the
 361 * proper permission bits. */
 362void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
 363{
 364        if (usermode) {
 365                struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 366                int i;
 367
 368                /* XXX Replace loop with fancy data structures. */
 369                for (i = 0; i < tlb1_max_shadow_size(); i++)
 370                        kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
 371
 372                _tlbil_all();
 373        }
 374}
 375
 376static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
 377                int tlbsel, int esel)
 378{
 379        struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 380
 381        if (unlikely(get_tlb_iprot(gtlbe)))
 382                return -1;
 383
 384        if (tlbsel == 1) {
 385                kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
 386                                get_tlb_end(gtlbe),
 387                                get_tlb_tid(gtlbe));
 388        } else {
 389                kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
 390        }
 391
 392        gtlbe->mas1 = 0;
 393
 394        return 0;
 395}
 396
 397int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
 398{
 399        int esel;
 400
 401        if (value & MMUCSR0_TLB0FI)
 402                for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
 403                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
 404        if (value & MMUCSR0_TLB1FI)
 405                for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
 406                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
 407
 408        _tlbil_all();
 409
 410        return EMULATE_DONE;
 411}
 412
 413int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
 414{
 415        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 416        unsigned int ia;
 417        int esel, tlbsel;
 418        gva_t ea;
 419
 420        ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb];
 421
 422        ia = (ea >> 2) & 0x1;
 423
 424        /* since we only have two TLBs, only lower bit is used. */
 425        tlbsel = (ea >> 3) & 0x1;
 426
 427        if (ia) {
 428                /* invalidate all entries */
 429                for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
 430                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 431        } else {
 432                ea &= 0xfffff000;
 433                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
 434                                get_cur_pid(vcpu), -1);
 435                if (esel >= 0)
 436                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 437        }
 438
 439        _tlbil_all();
 440
 441        return EMULATE_DONE;
 442}
 443
 444int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
 445{
 446        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 447        int tlbsel, esel;
 448        struct tlbe *gtlbe;
 449
 450        tlbsel = get_tlb_tlbsel(vcpu_e500);
 451        esel = get_tlb_esel(vcpu_e500, tlbsel);
 452
 453        gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 454        vcpu_e500->mas0 &= ~MAS0_NV(~0);
 455        vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
 456        vcpu_e500->mas1 = gtlbe->mas1;
 457        vcpu_e500->mas2 = gtlbe->mas2;
 458        vcpu_e500->mas3 = gtlbe->mas3;
 459        vcpu_e500->mas7 = gtlbe->mas7;
 460
 461        return EMULATE_DONE;
 462}
 463
 464int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 465{
 466        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 467        int as = !!get_cur_sas(vcpu_e500);
 468        unsigned int pid = get_cur_spid(vcpu_e500);
 469        int esel, tlbsel;
 470        struct tlbe *gtlbe = NULL;
 471        gva_t ea;
 472
 473        ea = vcpu->arch.gpr[rb];
 474
 475        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 476                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
 477                if (esel >= 0) {
 478                        gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 479                        break;
 480                }
 481        }
 482
 483        if (gtlbe) {
 484                vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
 485                        | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
 486                vcpu_e500->mas1 = gtlbe->mas1;
 487                vcpu_e500->mas2 = gtlbe->mas2;
 488                vcpu_e500->mas3 = gtlbe->mas3;
 489                vcpu_e500->mas7 = gtlbe->mas7;
 490        } else {
 491                int victim;
 492
 493                /* since we only have two TLBs, only lower bit is used. */
 494                tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
 495                victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
 496
 497                vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
 498                        | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
 499                vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
 500                        | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
 501                        | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
 502                vcpu_e500->mas2 &= MAS2_EPN;
 503                vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
 504                vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
 505                vcpu_e500->mas7 = 0;
 506        }
 507
 508        return EMULATE_DONE;
 509}
 510
 511int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 512{
 513        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 514        u64 eaddr;
 515        u64 raddr;
 516        u32 tid;
 517        struct tlbe *gtlbe;
 518        int tlbsel, esel, stlbsel, sesel;
 519
 520        tlbsel = get_tlb_tlbsel(vcpu_e500);
 521        esel = get_tlb_esel(vcpu_e500, tlbsel);
 522
 523        gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
 524
 525        if (get_tlb_v(gtlbe) && tlbsel == 1) {
 526                eaddr = get_tlb_eaddr(gtlbe);
 527                tid = get_tlb_tid(gtlbe);
 528                kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
 529                                get_tlb_end(gtlbe), tid);
 530        }
 531
 532        gtlbe->mas1 = vcpu_e500->mas1;
 533        gtlbe->mas2 = vcpu_e500->mas2;
 534        gtlbe->mas3 = vcpu_e500->mas3;
 535        gtlbe->mas7 = vcpu_e500->mas7;
 536
 537        trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
 538                             gtlbe->mas3, gtlbe->mas7);
 539
 540        /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
 541        if (tlbe_is_host_safe(vcpu, gtlbe)) {
 542                switch (tlbsel) {
 543                case 0:
 544                        /* TLB0 */
 545                        gtlbe->mas1 &= ~MAS1_TSIZE(~0);
 546                        gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 547
 548                        stlbsel = 0;
 549                        sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
 550
 551                        break;
 552
 553                case 1:
 554                        /* TLB1 */
 555                        eaddr = get_tlb_eaddr(gtlbe);
 556                        raddr = get_tlb_raddr(gtlbe);
 557
 558                        /* Create a 4KB mapping on the host.
 559                         * If the guest wanted a large page,
 560                         * only the first 4KB is mapped here and the rest
 561                         * are mapped on the fly. */
 562                        stlbsel = 1;
 563                        sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
 564                                        raddr >> PAGE_SHIFT, gtlbe);
 565                        break;
 566
 567                default:
 568                        BUG();
 569                }
 570                write_host_tlbe(vcpu_e500, stlbsel, sesel);
 571        }
 572
 573        return EMULATE_DONE;
 574}
 575
 576int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 577{
 578        unsigned int as = !!(vcpu->arch.msr & MSR_IS);
 579
 580        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 581}
 582
 583int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 584{
 585        unsigned int as = !!(vcpu->arch.msr & MSR_DS);
 586
 587        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 588}
 589
 590void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
 591{
 592        unsigned int as = !!(vcpu->arch.msr & MSR_IS);
 593
 594        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
 595}
 596
 597void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
 598{
 599        unsigned int as = !!(vcpu->arch.msr & MSR_DS);
 600
 601        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
 602}
 603
 604gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
 605                        gva_t eaddr)
 606{
 607        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 608        struct tlbe *gtlbe =
 609                &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
 610        u64 pgmask = get_tlb_bytes(gtlbe) - 1;
 611
 612        return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
 613}
 614
 615void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
 616{
 617        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 618        int tlbsel, i;
 619
 620        for (tlbsel = 0; tlbsel < 2; tlbsel++)
 621                for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
 622                        kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
 623
 624        /* discard all guest mapping */
 625        _tlbil_all();
 626}
 627
 628void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 629                        unsigned int index)
 630{
 631        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 632        int tlbsel = tlbsel_of(index);
 633        int esel = esel_of(index);
 634        int stlbsel, sesel;
 635
 636        switch (tlbsel) {
 637        case 0:
 638                stlbsel = 0;
 639                sesel = esel;
 640                break;
 641
 642        case 1: {
 643                gfn_t gfn = gpaddr >> PAGE_SHIFT;
 644                struct tlbe *gtlbe
 645                        = &vcpu_e500->guest_tlb[tlbsel][esel];
 646
 647                stlbsel = 1;
 648                sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
 649                break;
 650        }
 651
 652        default:
 653                BUG();
 654                break;
 655        }
 656        write_host_tlbe(vcpu_e500, stlbsel, sesel);
 657}
 658
 659int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
 660                                gva_t eaddr, unsigned int pid, int as)
 661{
 662        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 663        int esel, tlbsel;
 664
 665        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 666                esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
 667                if (esel >= 0)
 668                        return index_of(tlbsel, esel);
 669        }
 670
 671        return -1;
 672}
 673
 674void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
 675{
 676        struct tlbe *tlbe;
 677
 678        /* Insert large initial mapping for guest. */
 679        tlbe = &vcpu_e500->guest_tlb[1][0];
 680        tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
 681        tlbe->mas2 = 0;
 682        tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
 683        tlbe->mas7 = 0;
 684
 685        /* 4K map for serial output. Used by kernel wrapper. */
 686        tlbe = &vcpu_e500->guest_tlb[1][1];
 687        tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 688        tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
 689        tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
 690        tlbe->mas7 = 0;
 691}
 692
 693int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 694{
 695        tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
 696
 697        vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
 698        vcpu_e500->guest_tlb[0] =
 699                kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
 700        if (vcpu_e500->guest_tlb[0] == NULL)
 701                goto err_out;
 702
 703        vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
 704        vcpu_e500->shadow_tlb[0] =
 705                kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
 706        if (vcpu_e500->shadow_tlb[0] == NULL)
 707                goto err_out_guest0;
 708
 709        vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
 710        vcpu_e500->guest_tlb[1] =
 711                kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
 712        if (vcpu_e500->guest_tlb[1] == NULL)
 713                goto err_out_shadow0;
 714
 715        vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
 716        vcpu_e500->shadow_tlb[1] =
 717                kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
 718        if (vcpu_e500->shadow_tlb[1] == NULL)
 719                goto err_out_guest1;
 720
 721        vcpu_e500->shadow_pages[0] = (struct page **)
 722                kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
 723        if (vcpu_e500->shadow_pages[0] == NULL)
 724                goto err_out_shadow1;
 725
 726        vcpu_e500->shadow_pages[1] = (struct page **)
 727                kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL);
 728        if (vcpu_e500->shadow_pages[1] == NULL)
 729                goto err_out_page0;
 730
 731        return 0;
 732
 733err_out_page0:
 734        kfree(vcpu_e500->shadow_pages[0]);
 735err_out_shadow1:
 736        kfree(vcpu_e500->shadow_tlb[1]);
 737err_out_guest1:
 738        kfree(vcpu_e500->guest_tlb[1]);
 739err_out_shadow0:
 740        kfree(vcpu_e500->shadow_tlb[0]);
 741err_out_guest0:
 742        kfree(vcpu_e500->guest_tlb[0]);
 743err_out:
 744        return -1;
 745}
 746
 747void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 748{
 749        kfree(vcpu_e500->shadow_pages[1]);
 750        kfree(vcpu_e500->shadow_pages[0]);
 751        kfree(vcpu_e500->shadow_tlb[1]);
 752        kfree(vcpu_e500->guest_tlb[1]);
 753        kfree(vcpu_e500->shadow_tlb[0]);
 754        kfree(vcpu_e500->guest_tlb[0]);
 755}
 756