linux/arch/powerpc/kvm/e500_mmu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Yu Liu, yu.liu@freescale.com
   5 *         Scott Wood, scottwood@freescale.com
   6 *         Ashish Kalra, ashish.kalra@freescale.com
   7 *         Varun Sethi, varun.sethi@freescale.com
   8 *         Alexander Graf, agraf@suse.de
   9 *
  10 * Description:
  11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
  12 * by Hollis Blanchard <hollisb@us.ibm.com>.
  13 *
  14 * This program is free software; you can redistribute it and/or modify
  15 * it under the terms of the GNU General Public License, version 2, as
  16 * published by the Free Software Foundation.
  17 */
  18
  19#include <linux/kernel.h>
  20#include <linux/types.h>
  21#include <linux/slab.h>
  22#include <linux/string.h>
  23#include <linux/kvm.h>
  24#include <linux/kvm_host.h>
  25#include <linux/highmem.h>
  26#include <linux/log2.h>
  27#include <linux/uaccess.h>
  28#include <linux/sched.h>
  29#include <linux/rwsem.h>
  30#include <linux/vmalloc.h>
  31#include <linux/hugetlb.h>
  32#include <asm/kvm_ppc.h>
  33
  34#include "e500.h"
  35#include "trace.h"
  36#include "timing.h"
  37#include "e500_mmu_host.h"
  38
  39static inline unsigned int gtlb0_get_next_victim(
  40                struct kvmppc_vcpu_e500 *vcpu_e500)
  41{
  42        unsigned int victim;
  43
  44        victim = vcpu_e500->gtlb_nv[0]++;
  45        if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
  46                vcpu_e500->gtlb_nv[0] = 0;
  47
  48        return victim;
  49}
  50
  51static int tlb0_set_base(gva_t addr, int sets, int ways)
  52{
  53        int set_base;
  54
  55        set_base = (addr >> PAGE_SHIFT) & (sets - 1);
  56        set_base *= ways;
  57
  58        return set_base;
  59}
  60
  61static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
  62{
  63        return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
  64                             vcpu_e500->gtlb_params[0].ways);
  65}
  66
  67static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
  68{
  69        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  70        int esel = get_tlb_esel_bit(vcpu);
  71
  72        if (tlbsel == 0) {
  73                esel &= vcpu_e500->gtlb_params[0].ways - 1;
  74                esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
  75        } else {
  76                esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
  77        }
  78
  79        return esel;
  80}
  81
  82/* Search the guest TLB for a matching entry. */
  83static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
  84                gva_t eaddr, int tlbsel, unsigned int pid, int as)
  85{
  86        int size = vcpu_e500->gtlb_params[tlbsel].entries;
  87        unsigned int set_base, offset;
  88        int i;
  89
  90        if (tlbsel == 0) {
  91                set_base = gtlb0_set_base(vcpu_e500, eaddr);
  92                size = vcpu_e500->gtlb_params[0].ways;
  93        } else {
  94                if (eaddr < vcpu_e500->tlb1_min_eaddr ||
  95                                eaddr > vcpu_e500->tlb1_max_eaddr)
  96                        return -1;
  97                set_base = 0;
  98        }
  99
 100        offset = vcpu_e500->gtlb_offset[tlbsel];
 101
 102        for (i = 0; i < size; i++) {
 103                struct kvm_book3e_206_tlb_entry *tlbe =
 104                        &vcpu_e500->gtlb_arch[offset + set_base + i];
 105                unsigned int tid;
 106
 107                if (eaddr < get_tlb_eaddr(tlbe))
 108                        continue;
 109
 110                if (eaddr > get_tlb_end(tlbe))
 111                        continue;
 112
 113                tid = get_tlb_tid(tlbe);
 114                if (tid && (tid != pid))
 115                        continue;
 116
 117                if (!get_tlb_v(tlbe))
 118                        continue;
 119
 120                if (get_tlb_ts(tlbe) != as && as != -1)
 121                        continue;
 122
 123                return set_base + i;
 124        }
 125
 126        return -1;
 127}
 128
 129static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
 130                unsigned int eaddr, int as)
 131{
 132        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 133        unsigned int victim, tsized;
 134        int tlbsel;
 135
 136        /* since we only have two TLBs, only lower bit is used. */
 137        tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
 138        victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
 139        tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
 140
 141        vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
 142                | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 143        vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
 144                | MAS1_TID(get_tlbmiss_tid(vcpu))
 145                | MAS1_TSIZE(tsized);
 146        vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
 147                | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
 148        vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
 149        vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
 150                | (get_cur_pid(vcpu) << 16)
 151                | (as ? MAS6_SAS : 0);
 152}
 153
 154static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
 155{
 156        int size = vcpu_e500->gtlb_params[1].entries;
 157        unsigned int offset;
 158        gva_t eaddr;
 159        int i;
 160
 161        vcpu_e500->tlb1_min_eaddr = ~0UL;
 162        vcpu_e500->tlb1_max_eaddr = 0;
 163        offset = vcpu_e500->gtlb_offset[1];
 164
 165        for (i = 0; i < size; i++) {
 166                struct kvm_book3e_206_tlb_entry *tlbe =
 167                        &vcpu_e500->gtlb_arch[offset + i];
 168
 169                if (!get_tlb_v(tlbe))
 170                        continue;
 171
 172                eaddr = get_tlb_eaddr(tlbe);
 173                vcpu_e500->tlb1_min_eaddr =
 174                                min(vcpu_e500->tlb1_min_eaddr, eaddr);
 175
 176                eaddr = get_tlb_end(tlbe);
 177                vcpu_e500->tlb1_max_eaddr =
 178                                max(vcpu_e500->tlb1_max_eaddr, eaddr);
 179        }
 180}
 181
 182static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
 183                                struct kvm_book3e_206_tlb_entry *gtlbe)
 184{
 185        unsigned long start, end, size;
 186
 187        size = get_tlb_bytes(gtlbe);
 188        start = get_tlb_eaddr(gtlbe) & ~(size - 1);
 189        end = start + size - 1;
 190
 191        return vcpu_e500->tlb1_min_eaddr == start ||
 192                        vcpu_e500->tlb1_max_eaddr == end;
 193}
 194
 195/* This function is supposed to be called for a adding a new valid tlb entry */
 196static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
 197                                struct kvm_book3e_206_tlb_entry *gtlbe)
 198{
 199        unsigned long start, end, size;
 200        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 201
 202        if (!get_tlb_v(gtlbe))
 203                return;
 204
 205        size = get_tlb_bytes(gtlbe);
 206        start = get_tlb_eaddr(gtlbe) & ~(size - 1);
 207        end = start + size - 1;
 208
 209        vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
 210        vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
 211}
 212
 213static inline int kvmppc_e500_gtlbe_invalidate(
 214                                struct kvmppc_vcpu_e500 *vcpu_e500,
 215                                int tlbsel, int esel)
 216{
 217        struct kvm_book3e_206_tlb_entry *gtlbe =
 218                get_entry(vcpu_e500, tlbsel, esel);
 219
 220        if (unlikely(get_tlb_iprot(gtlbe)))
 221                return -1;
 222
 223        if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
 224                kvmppc_recalc_tlb1map_range(vcpu_e500);
 225
 226        gtlbe->mas1 = 0;
 227
 228        return 0;
 229}
 230
 231int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
 232{
 233        int esel;
 234
 235        if (value & MMUCSR0_TLB0FI)
 236                for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
 237                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
 238        if (value & MMUCSR0_TLB1FI)
 239                for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
 240                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
 241
 242        /* Invalidate all host shadow mappings */
 243        kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
 244
 245        return EMULATE_DONE;
 246}
 247
 248int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
 249{
 250        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 251        unsigned int ia;
 252        int esel, tlbsel;
 253
 254        ia = (ea >> 2) & 0x1;
 255
 256        /* since we only have two TLBs, only lower bit is used. */
 257        tlbsel = (ea >> 3) & 0x1;
 258
 259        if (ia) {
 260                /* invalidate all entries */
 261                for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
 262                     esel++)
 263                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 264        } else {
 265                ea &= 0xfffff000;
 266                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
 267                                get_cur_pid(vcpu), -1);
 268                if (esel >= 0)
 269                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 270        }
 271
 272        /* Invalidate all host shadow mappings */
 273        kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
 274
 275        return EMULATE_DONE;
 276}
 277
 278static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
 279                       int pid, int type)
 280{
 281        struct kvm_book3e_206_tlb_entry *tlbe;
 282        int tid, esel;
 283
 284        /* invalidate all entries */
 285        for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
 286                tlbe = get_entry(vcpu_e500, tlbsel, esel);
 287                tid = get_tlb_tid(tlbe);
 288                if (type == 0 || tid == pid) {
 289                        inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
 290                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 291                }
 292        }
 293}
 294
 295static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
 296                       gva_t ea)
 297{
 298        int tlbsel, esel;
 299
 300        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 301                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
 302                if (esel >= 0) {
 303                        inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
 304                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 305                        break;
 306                }
 307        }
 308}
 309
 310int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
 311{
 312        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 313        int pid = get_cur_spid(vcpu);
 314
 315        if (type == 0 || type == 1) {
 316                tlbilx_all(vcpu_e500, 0, pid, type);
 317                tlbilx_all(vcpu_e500, 1, pid, type);
 318        } else if (type == 3) {
 319                tlbilx_one(vcpu_e500, pid, ea);
 320        }
 321
 322        return EMULATE_DONE;
 323}
 324
 325int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
 326{
 327        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 328        int tlbsel, esel;
 329        struct kvm_book3e_206_tlb_entry *gtlbe;
 330
 331        tlbsel = get_tlb_tlbsel(vcpu);
 332        esel = get_tlb_esel(vcpu, tlbsel);
 333
 334        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 335        vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
 336        vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 337        vcpu->arch.shared->mas1 = gtlbe->mas1;
 338        vcpu->arch.shared->mas2 = gtlbe->mas2;
 339        vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
 340
 341        return EMULATE_DONE;
 342}
 343
 344int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
 345{
 346        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 347        int as = !!get_cur_sas(vcpu);
 348        unsigned int pid = get_cur_spid(vcpu);
 349        int esel, tlbsel;
 350        struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
 351
 352        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 353                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
 354                if (esel >= 0) {
 355                        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 356                        break;
 357                }
 358        }
 359
 360        if (gtlbe) {
 361                esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
 362
 363                vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
 364                        | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 365                vcpu->arch.shared->mas1 = gtlbe->mas1;
 366                vcpu->arch.shared->mas2 = gtlbe->mas2;
 367                vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
 368        } else {
 369                int victim;
 370
 371                /* since we only have two TLBs, only lower bit is used. */
 372                tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
 373                victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
 374
 375                vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
 376                        | MAS0_ESEL(victim)
 377                        | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 378                vcpu->arch.shared->mas1 =
 379                          (vcpu->arch.shared->mas6 & MAS6_SPID0)
 380                        | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
 381                        | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
 382                vcpu->arch.shared->mas2 &= MAS2_EPN;
 383                vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
 384                                           MAS2_ATTRIB_MASK;
 385                vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
 386                                             MAS3_U2 | MAS3_U3;
 387        }
 388
 389        kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
 390        return EMULATE_DONE;
 391}
 392
 393int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 394{
 395        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 396        struct kvm_book3e_206_tlb_entry *gtlbe;
 397        int tlbsel, esel;
 398        int recal = 0;
 399        int idx;
 400
 401        tlbsel = get_tlb_tlbsel(vcpu);
 402        esel = get_tlb_esel(vcpu, tlbsel);
 403
 404        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 405
 406        if (get_tlb_v(gtlbe)) {
 407                inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
 408                if ((tlbsel == 1) &&
 409                        kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
 410                        recal = 1;
 411        }
 412
 413        gtlbe->mas1 = vcpu->arch.shared->mas1;
 414        gtlbe->mas2 = vcpu->arch.shared->mas2;
 415        if (!(vcpu->arch.shared->msr & MSR_CM))
 416                gtlbe->mas2 &= 0xffffffffUL;
 417        gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
 418
 419        trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
 420                                      gtlbe->mas2, gtlbe->mas7_3);
 421
 422        if (tlbsel == 1) {
 423                /*
 424                 * If a valid tlb1 entry is overwritten then recalculate the
 425                 * min/max TLB1 map address range otherwise no need to look
 426                 * in tlb1 array.
 427                 */
 428                if (recal)
 429                        kvmppc_recalc_tlb1map_range(vcpu_e500);
 430                else
 431                        kvmppc_set_tlb1map_range(vcpu, gtlbe);
 432        }
 433
 434        idx = srcu_read_lock(&vcpu->kvm->srcu);
 435
 436        /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
 437        if (tlbe_is_host_safe(vcpu, gtlbe)) {
 438                u64 eaddr = get_tlb_eaddr(gtlbe);
 439                u64 raddr = get_tlb_raddr(gtlbe);
 440
 441                if (tlbsel == 0) {
 442                        gtlbe->mas1 &= ~MAS1_TSIZE(~0);
 443                        gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 444                }
 445
 446                /* Premap the faulting page */
 447                kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
 448        }
 449
 450        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 451
 452        kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
 453        return EMULATE_DONE;
 454}
 455
 456static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
 457                                  gva_t eaddr, unsigned int pid, int as)
 458{
 459        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 460        int esel, tlbsel;
 461
 462        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 463                esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
 464                if (esel >= 0)
 465                        return index_of(tlbsel, esel);
 466        }
 467
 468        return -1;
 469}
 470
 471/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
 472int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
 473                               struct kvm_translation *tr)
 474{
 475        int index;
 476        gva_t eaddr;
 477        u8 pid;
 478        u8 as;
 479
 480        eaddr = tr->linear_address;
 481        pid = (tr->linear_address >> 32) & 0xff;
 482        as = (tr->linear_address >> 40) & 0x1;
 483
 484        index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
 485        if (index < 0) {
 486                tr->valid = 0;
 487                return 0;
 488        }
 489
 490        tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
 491        /* XXX what does "writeable" and "usermode" even mean? */
 492        tr->valid = 1;
 493
 494        return 0;
 495}
 496
 497
 498int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 499{
 500        unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 501
 502        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 503}
 504
 505int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 506{
 507        unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
 508
 509        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 510}
 511
 512void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
 513{
 514        unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 515
 516        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
 517}
 518
 519void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
 520{
 521        unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
 522
 523        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
 524}
 525
 526gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
 527                        gva_t eaddr)
 528{
 529        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 530        struct kvm_book3e_206_tlb_entry *gtlbe;
 531        u64 pgmask;
 532
 533        gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
 534        pgmask = get_tlb_bytes(gtlbe) - 1;
 535
 536        return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
 537}
 538
 539void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
 540{
 541}
 542
 543/*****************************************/
 544
 545static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
 546{
 547        int i;
 548
 549        kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
 550        kfree(vcpu_e500->g2h_tlb1_map);
 551        kfree(vcpu_e500->gtlb_priv[0]);
 552        kfree(vcpu_e500->gtlb_priv[1]);
 553
 554        if (vcpu_e500->shared_tlb_pages) {
 555                vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
 556                                          PAGE_SIZE)));
 557
 558                for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
 559                        set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
 560                        put_page(vcpu_e500->shared_tlb_pages[i]);
 561                }
 562
 563                vcpu_e500->num_shared_tlb_pages = 0;
 564
 565                kfree(vcpu_e500->shared_tlb_pages);
 566                vcpu_e500->shared_tlb_pages = NULL;
 567        } else {
 568                kfree(vcpu_e500->gtlb_arch);
 569        }
 570
 571        vcpu_e500->gtlb_arch = NULL;
 572}
 573
 574void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 575{
 576        sregs->u.e.mas0 = vcpu->arch.shared->mas0;
 577        sregs->u.e.mas1 = vcpu->arch.shared->mas1;
 578        sregs->u.e.mas2 = vcpu->arch.shared->mas2;
 579        sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
 580        sregs->u.e.mas4 = vcpu->arch.shared->mas4;
 581        sregs->u.e.mas6 = vcpu->arch.shared->mas6;
 582
 583        sregs->u.e.mmucfg = vcpu->arch.mmucfg;
 584        sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
 585        sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
 586        sregs->u.e.tlbcfg[2] = 0;
 587        sregs->u.e.tlbcfg[3] = 0;
 588}
 589
 590int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 591{
 592        if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
 593                vcpu->arch.shared->mas0 = sregs->u.e.mas0;
 594                vcpu->arch.shared->mas1 = sregs->u.e.mas1;
 595                vcpu->arch.shared->mas2 = sregs->u.e.mas2;
 596                vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
 597                vcpu->arch.shared->mas4 = sregs->u.e.mas4;
 598                vcpu->arch.shared->mas6 = sregs->u.e.mas6;
 599        }
 600
 601        return 0;
 602}
 603
 604int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
 605                                union kvmppc_one_reg *val)
 606{
 607        int r = 0;
 608        long int i;
 609
 610        switch (id) {
 611        case KVM_REG_PPC_MAS0:
 612                *val = get_reg_val(id, vcpu->arch.shared->mas0);
 613                break;
 614        case KVM_REG_PPC_MAS1:
 615                *val = get_reg_val(id, vcpu->arch.shared->mas1);
 616                break;
 617        case KVM_REG_PPC_MAS2:
 618                *val = get_reg_val(id, vcpu->arch.shared->mas2);
 619                break;
 620        case KVM_REG_PPC_MAS7_3:
 621                *val = get_reg_val(id, vcpu->arch.shared->mas7_3);
 622                break;
 623        case KVM_REG_PPC_MAS4:
 624                *val = get_reg_val(id, vcpu->arch.shared->mas4);
 625                break;
 626        case KVM_REG_PPC_MAS6:
 627                *val = get_reg_val(id, vcpu->arch.shared->mas6);
 628                break;
 629        case KVM_REG_PPC_MMUCFG:
 630                *val = get_reg_val(id, vcpu->arch.mmucfg);
 631                break;
 632        case KVM_REG_PPC_EPTCFG:
 633                *val = get_reg_val(id, vcpu->arch.eptcfg);
 634                break;
 635        case KVM_REG_PPC_TLB0CFG:
 636        case KVM_REG_PPC_TLB1CFG:
 637        case KVM_REG_PPC_TLB2CFG:
 638        case KVM_REG_PPC_TLB3CFG:
 639                i = id - KVM_REG_PPC_TLB0CFG;
 640                *val = get_reg_val(id, vcpu->arch.tlbcfg[i]);
 641                break;
 642        case KVM_REG_PPC_TLB0PS:
 643        case KVM_REG_PPC_TLB1PS:
 644        case KVM_REG_PPC_TLB2PS:
 645        case KVM_REG_PPC_TLB3PS:
 646                i = id - KVM_REG_PPC_TLB0PS;
 647                *val = get_reg_val(id, vcpu->arch.tlbps[i]);
 648                break;
 649        default:
 650                r = -EINVAL;
 651                break;
 652        }
 653
 654        return r;
 655}
 656
 657int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
 658                               union kvmppc_one_reg *val)
 659{
 660        int r = 0;
 661        long int i;
 662
 663        switch (id) {
 664        case KVM_REG_PPC_MAS0:
 665                vcpu->arch.shared->mas0 = set_reg_val(id, *val);
 666                break;
 667        case KVM_REG_PPC_MAS1:
 668                vcpu->arch.shared->mas1 = set_reg_val(id, *val);
 669                break;
 670        case KVM_REG_PPC_MAS2:
 671                vcpu->arch.shared->mas2 = set_reg_val(id, *val);
 672                break;
 673        case KVM_REG_PPC_MAS7_3:
 674                vcpu->arch.shared->mas7_3 = set_reg_val(id, *val);
 675                break;
 676        case KVM_REG_PPC_MAS4:
 677                vcpu->arch.shared->mas4 = set_reg_val(id, *val);
 678                break;
 679        case KVM_REG_PPC_MAS6:
 680                vcpu->arch.shared->mas6 = set_reg_val(id, *val);
 681                break;
 682        /* Only allow MMU registers to be set to the config supported by KVM */
 683        case KVM_REG_PPC_MMUCFG: {
 684                u32 reg = set_reg_val(id, *val);
 685                if (reg != vcpu->arch.mmucfg)
 686                        r = -EINVAL;
 687                break;
 688        }
 689        case KVM_REG_PPC_EPTCFG: {
 690                u32 reg = set_reg_val(id, *val);
 691                if (reg != vcpu->arch.eptcfg)
 692                        r = -EINVAL;
 693                break;
 694        }
 695        case KVM_REG_PPC_TLB0CFG:
 696        case KVM_REG_PPC_TLB1CFG:
 697        case KVM_REG_PPC_TLB2CFG:
 698        case KVM_REG_PPC_TLB3CFG: {
 699                /* MMU geometry (N_ENTRY/ASSOC) can be set only using SW_TLB */
 700                u32 reg = set_reg_val(id, *val);
 701                i = id - KVM_REG_PPC_TLB0CFG;
 702                if (reg != vcpu->arch.tlbcfg[i])
 703                        r = -EINVAL;
 704                break;
 705        }
 706        case KVM_REG_PPC_TLB0PS:
 707        case KVM_REG_PPC_TLB1PS:
 708        case KVM_REG_PPC_TLB2PS:
 709        case KVM_REG_PPC_TLB3PS: {
 710                u32 reg = set_reg_val(id, *val);
 711                i = id - KVM_REG_PPC_TLB0PS;
 712                if (reg != vcpu->arch.tlbps[i])
 713                        r = -EINVAL;
 714                break;
 715        }
 716        default:
 717                r = -EINVAL;
 718                break;
 719        }
 720
 721        return r;
 722}
 723
 724static int vcpu_mmu_geometry_update(struct kvm_vcpu *vcpu,
 725                struct kvm_book3e_206_tlb_params *params)
 726{
 727        vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 728        if (params->tlb_sizes[0] <= 2048)
 729                vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0];
 730        vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
 731
 732        vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 733        vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1];
 734        vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
 735        return 0;
 736}
 737
 738int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
 739                              struct kvm_config_tlb *cfg)
 740{
 741        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 742        struct kvm_book3e_206_tlb_params params;
 743        char *virt;
 744        struct page **pages;
 745        struct tlbe_priv *privs[2] = {};
 746        u64 *g2h_bitmap = NULL;
 747        size_t array_len;
 748        u32 sets;
 749        int num_pages, ret, i;
 750
 751        if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
 752                return -EINVAL;
 753
 754        if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
 755                           sizeof(params)))
 756                return -EFAULT;
 757
 758        if (params.tlb_sizes[1] > 64)
 759                return -EINVAL;
 760        if (params.tlb_ways[1] != params.tlb_sizes[1])
 761                return -EINVAL;
 762        if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
 763                return -EINVAL;
 764        if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
 765                return -EINVAL;
 766
 767        if (!is_power_of_2(params.tlb_ways[0]))
 768                return -EINVAL;
 769
 770        sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
 771        if (!is_power_of_2(sets))
 772                return -EINVAL;
 773
 774        array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
 775        array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
 776
 777        if (cfg->array_len < array_len)
 778                return -EINVAL;
 779
 780        num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
 781                    cfg->array / PAGE_SIZE;
 782        pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
 783        if (!pages)
 784                return -ENOMEM;
 785
 786        ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
 787        if (ret < 0)
 788                goto err_pages;
 789
 790        if (ret != num_pages) {
 791                num_pages = ret;
 792                ret = -EFAULT;
 793                goto err_put_page;
 794        }
 795
 796        virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
 797        if (!virt) {
 798                ret = -ENOMEM;
 799                goto err_put_page;
 800        }
 801
 802        privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
 803                           GFP_KERNEL);
 804        privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
 805                           GFP_KERNEL);
 806
 807        if (!privs[0] || !privs[1]) {
 808                ret = -ENOMEM;
 809                goto err_privs;
 810        }
 811
 812        g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
 813                             GFP_KERNEL);
 814        if (!g2h_bitmap) {
 815                ret = -ENOMEM;
 816                goto err_privs;
 817        }
 818
 819        free_gtlb(vcpu_e500);
 820
 821        vcpu_e500->gtlb_priv[0] = privs[0];
 822        vcpu_e500->gtlb_priv[1] = privs[1];
 823        vcpu_e500->g2h_tlb1_map = g2h_bitmap;
 824
 825        vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
 826                (virt + (cfg->array & (PAGE_SIZE - 1)));
 827
 828        vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
 829        vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
 830
 831        vcpu_e500->gtlb_offset[0] = 0;
 832        vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
 833
 834        /* Update vcpu's MMU geometry based on SW_TLB input */
 835        vcpu_mmu_geometry_update(vcpu, &params);
 836
 837        vcpu_e500->shared_tlb_pages = pages;
 838        vcpu_e500->num_shared_tlb_pages = num_pages;
 839
 840        vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
 841        vcpu_e500->gtlb_params[0].sets = sets;
 842
 843        vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
 844        vcpu_e500->gtlb_params[1].sets = 1;
 845
 846        kvmppc_recalc_tlb1map_range(vcpu_e500);
 847        return 0;
 848
 849err_privs:
 850        kfree(privs[0]);
 851        kfree(privs[1]);
 852
 853err_put_page:
 854        for (i = 0; i < num_pages; i++)
 855                put_page(pages[i]);
 856
 857err_pages:
 858        kfree(pages);
 859        return ret;
 860}
 861
 862int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
 863                             struct kvm_dirty_tlb *dirty)
 864{
 865        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 866        kvmppc_recalc_tlb1map_range(vcpu_e500);
 867        kvmppc_core_flush_tlb(vcpu);
 868        return 0;
 869}
 870
 871/* Vcpu's MMU default configuration */
 872static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
 873                       struct kvmppc_e500_tlb_params *params)
 874{
 875        /* Initialize RASIZE, PIDSIZE, NTLBS and MAVN fields with host values*/
 876        vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
 877
 878        /* Initialize TLBnCFG fields with host values and SW_TLB geometry*/
 879        vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
 880                             ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 881        vcpu->arch.tlbcfg[0] |= params[0].entries;
 882        vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT;
 883
 884        vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
 885                             ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 886        vcpu->arch.tlbcfg[1] |= params[1].entries;
 887        vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT;
 888
 889        if (has_feature(vcpu, VCPU_FTR_MMU_V2)) {
 890                vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS);
 891                vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS);
 892
 893                vcpu->arch.mmucfg &= ~MMUCFG_LRAT;
 894
 895                /* Guest mmu emulation currently doesn't handle E.PT */
 896                vcpu->arch.eptcfg = 0;
 897                vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT;
 898                vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND;
 899        }
 900
 901        return 0;
 902}
 903
 904int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 905{
 906        struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
 907        int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
 908        int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
 909
 910        if (e500_mmu_host_init(vcpu_e500))
 911                goto err;
 912
 913        vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
 914        vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
 915
 916        vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
 917        vcpu_e500->gtlb_params[0].sets =
 918                KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
 919
 920        vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
 921        vcpu_e500->gtlb_params[1].sets = 1;
 922
 923        vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
 924        if (!vcpu_e500->gtlb_arch)
 925                return -ENOMEM;
 926
 927        vcpu_e500->gtlb_offset[0] = 0;
 928        vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
 929
 930        vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
 931                                          vcpu_e500->gtlb_params[0].entries,
 932                                          GFP_KERNEL);
 933        if (!vcpu_e500->gtlb_priv[0])
 934                goto err;
 935
 936        vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
 937                                          vcpu_e500->gtlb_params[1].entries,
 938                                          GFP_KERNEL);
 939        if (!vcpu_e500->gtlb_priv[1])
 940                goto err;
 941
 942        vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(u64) *
 943                                          vcpu_e500->gtlb_params[1].entries,
 944                                          GFP_KERNEL);
 945        if (!vcpu_e500->g2h_tlb1_map)
 946                goto err;
 947
 948        vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params);
 949
 950        kvmppc_recalc_tlb1map_range(vcpu_e500);
 951        return 0;
 952
 953err:
 954        free_gtlb(vcpu_e500);
 955        return -1;
 956}
 957
 958void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 959{
 960        free_gtlb(vcpu_e500);
 961        e500_mmu_host_uninit(vcpu_e500);
 962}
 963