linux/arch/powerpc/kvm/e500_mmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
   4 *
   5 * Author: Yu Liu, yu.liu@freescale.com
   6 *         Scott Wood, scottwood@freescale.com
   7 *         Ashish Kalra, ashish.kalra@freescale.com
   8 *         Varun Sethi, varun.sethi@freescale.com
   9 *         Alexander Graf, agraf@suse.de
  10 *
  11 * Description:
  12 * This file is based on arch/powerpc/kvm/44x_tlb.c,
  13 * by Hollis Blanchard <hollisb@us.ibm.com>.
  14 */
  15
  16#include <linux/kernel.h>
  17#include <linux/types.h>
  18#include <linux/slab.h>
  19#include <linux/string.h>
  20#include <linux/kvm.h>
  21#include <linux/kvm_host.h>
  22#include <linux/highmem.h>
  23#include <linux/log2.h>
  24#include <linux/uaccess.h>
  25#include <linux/sched.h>
  26#include <linux/rwsem.h>
  27#include <linux/vmalloc.h>
  28#include <linux/hugetlb.h>
  29#include <asm/kvm_ppc.h>
  30
  31#include "e500.h"
  32#include "trace_booke.h"
  33#include "timing.h"
  34#include "e500_mmu_host.h"
  35
  36static inline unsigned int gtlb0_get_next_victim(
  37                struct kvmppc_vcpu_e500 *vcpu_e500)
  38{
  39        unsigned int victim;
  40
  41        victim = vcpu_e500->gtlb_nv[0]++;
  42        if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
  43                vcpu_e500->gtlb_nv[0] = 0;
  44
  45        return victim;
  46}
  47
  48static int tlb0_set_base(gva_t addr, int sets, int ways)
  49{
  50        int set_base;
  51
  52        set_base = (addr >> PAGE_SHIFT) & (sets - 1);
  53        set_base *= ways;
  54
  55        return set_base;
  56}
  57
  58static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
  59{
  60        return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
  61                             vcpu_e500->gtlb_params[0].ways);
  62}
  63
  64static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
  65{
  66        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
  67        int esel = get_tlb_esel_bit(vcpu);
  68
  69        if (tlbsel == 0) {
  70                esel &= vcpu_e500->gtlb_params[0].ways - 1;
  71                esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
  72        } else {
  73                esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
  74        }
  75
  76        return esel;
  77}
  78
  79/* Search the guest TLB for a matching entry. */
  80static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
  81                gva_t eaddr, int tlbsel, unsigned int pid, int as)
  82{
  83        int size = vcpu_e500->gtlb_params[tlbsel].entries;
  84        unsigned int set_base, offset;
  85        int i;
  86
  87        if (tlbsel == 0) {
  88                set_base = gtlb0_set_base(vcpu_e500, eaddr);
  89                size = vcpu_e500->gtlb_params[0].ways;
  90        } else {
  91                if (eaddr < vcpu_e500->tlb1_min_eaddr ||
  92                                eaddr > vcpu_e500->tlb1_max_eaddr)
  93                        return -1;
  94                set_base = 0;
  95        }
  96
  97        offset = vcpu_e500->gtlb_offset[tlbsel];
  98
  99        for (i = 0; i < size; i++) {
 100                struct kvm_book3e_206_tlb_entry *tlbe =
 101                        &vcpu_e500->gtlb_arch[offset + set_base + i];
 102                unsigned int tid;
 103
 104                if (eaddr < get_tlb_eaddr(tlbe))
 105                        continue;
 106
 107                if (eaddr > get_tlb_end(tlbe))
 108                        continue;
 109
 110                tid = get_tlb_tid(tlbe);
 111                if (tid && (tid != pid))
 112                        continue;
 113
 114                if (!get_tlb_v(tlbe))
 115                        continue;
 116
 117                if (get_tlb_ts(tlbe) != as && as != -1)
 118                        continue;
 119
 120                return set_base + i;
 121        }
 122
 123        return -1;
 124}
 125
 126static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
 127                gva_t eaddr, int as)
 128{
 129        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 130        unsigned int victim, tsized;
 131        int tlbsel;
 132
 133        /* since we only have two TLBs, only lower bit is used. */
 134        tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
 135        victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
 136        tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
 137
 138        vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
 139                | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 140        vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
 141                | MAS1_TID(get_tlbmiss_tid(vcpu))
 142                | MAS1_TSIZE(tsized);
 143        vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
 144                | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
 145        vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
 146        vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
 147                | (get_cur_pid(vcpu) << 16)
 148                | (as ? MAS6_SAS : 0);
 149}
 150
 151static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
 152{
 153        int size = vcpu_e500->gtlb_params[1].entries;
 154        unsigned int offset;
 155        gva_t eaddr;
 156        int i;
 157
 158        vcpu_e500->tlb1_min_eaddr = ~0UL;
 159        vcpu_e500->tlb1_max_eaddr = 0;
 160        offset = vcpu_e500->gtlb_offset[1];
 161
 162        for (i = 0; i < size; i++) {
 163                struct kvm_book3e_206_tlb_entry *tlbe =
 164                        &vcpu_e500->gtlb_arch[offset + i];
 165
 166                if (!get_tlb_v(tlbe))
 167                        continue;
 168
 169                eaddr = get_tlb_eaddr(tlbe);
 170                vcpu_e500->tlb1_min_eaddr =
 171                                min(vcpu_e500->tlb1_min_eaddr, eaddr);
 172
 173                eaddr = get_tlb_end(tlbe);
 174                vcpu_e500->tlb1_max_eaddr =
 175                                max(vcpu_e500->tlb1_max_eaddr, eaddr);
 176        }
 177}
 178
 179static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
 180                                struct kvm_book3e_206_tlb_entry *gtlbe)
 181{
 182        unsigned long start, end, size;
 183
 184        size = get_tlb_bytes(gtlbe);
 185        start = get_tlb_eaddr(gtlbe) & ~(size - 1);
 186        end = start + size - 1;
 187
 188        return vcpu_e500->tlb1_min_eaddr == start ||
 189                        vcpu_e500->tlb1_max_eaddr == end;
 190}
 191
 192/* This function is supposed to be called for a adding a new valid tlb entry */
 193static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
 194                                struct kvm_book3e_206_tlb_entry *gtlbe)
 195{
 196        unsigned long start, end, size;
 197        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 198
 199        if (!get_tlb_v(gtlbe))
 200                return;
 201
 202        size = get_tlb_bytes(gtlbe);
 203        start = get_tlb_eaddr(gtlbe) & ~(size - 1);
 204        end = start + size - 1;
 205
 206        vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
 207        vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
 208}
 209
 210static inline int kvmppc_e500_gtlbe_invalidate(
 211                                struct kvmppc_vcpu_e500 *vcpu_e500,
 212                                int tlbsel, int esel)
 213{
 214        struct kvm_book3e_206_tlb_entry *gtlbe =
 215                get_entry(vcpu_e500, tlbsel, esel);
 216
 217        if (unlikely(get_tlb_iprot(gtlbe)))
 218                return -1;
 219
 220        if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
 221                kvmppc_recalc_tlb1map_range(vcpu_e500);
 222
 223        gtlbe->mas1 = 0;
 224
 225        return 0;
 226}
 227
 228int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
 229{
 230        int esel;
 231
 232        if (value & MMUCSR0_TLB0FI)
 233                for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
 234                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
 235        if (value & MMUCSR0_TLB1FI)
 236                for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
 237                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
 238
 239        /* Invalidate all host shadow mappings */
 240        kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
 241
 242        return EMULATE_DONE;
 243}
 244
 245int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
 246{
 247        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 248        unsigned int ia;
 249        int esel, tlbsel;
 250
 251        ia = (ea >> 2) & 0x1;
 252
 253        /* since we only have two TLBs, only lower bit is used. */
 254        tlbsel = (ea >> 3) & 0x1;
 255
 256        if (ia) {
 257                /* invalidate all entries */
 258                for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
 259                     esel++)
 260                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 261        } else {
 262                ea &= 0xfffff000;
 263                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
 264                                get_cur_pid(vcpu), -1);
 265                if (esel >= 0)
 266                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 267        }
 268
 269        /* Invalidate all host shadow mappings */
 270        kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
 271
 272        return EMULATE_DONE;
 273}
 274
 275static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
 276                       int pid, int type)
 277{
 278        struct kvm_book3e_206_tlb_entry *tlbe;
 279        int tid, esel;
 280
 281        /* invalidate all entries */
 282        for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
 283                tlbe = get_entry(vcpu_e500, tlbsel, esel);
 284                tid = get_tlb_tid(tlbe);
 285                if (type == 0 || tid == pid) {
 286                        inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
 287                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 288                }
 289        }
 290}
 291
 292static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
 293                       gva_t ea)
 294{
 295        int tlbsel, esel;
 296
 297        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 298                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
 299                if (esel >= 0) {
 300                        inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
 301                        kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 302                        break;
 303                }
 304        }
 305}
 306
 307int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
 308{
 309        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 310        int pid = get_cur_spid(vcpu);
 311
 312        if (type == 0 || type == 1) {
 313                tlbilx_all(vcpu_e500, 0, pid, type);
 314                tlbilx_all(vcpu_e500, 1, pid, type);
 315        } else if (type == 3) {
 316                tlbilx_one(vcpu_e500, pid, ea);
 317        }
 318
 319        return EMULATE_DONE;
 320}
 321
 322int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
 323{
 324        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 325        int tlbsel, esel;
 326        struct kvm_book3e_206_tlb_entry *gtlbe;
 327
 328        tlbsel = get_tlb_tlbsel(vcpu);
 329        esel = get_tlb_esel(vcpu, tlbsel);
 330
 331        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 332        vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
 333        vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 334        vcpu->arch.shared->mas1 = gtlbe->mas1;
 335        vcpu->arch.shared->mas2 = gtlbe->mas2;
 336        vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
 337
 338        return EMULATE_DONE;
 339}
 340
 341int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
 342{
 343        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 344        int as = !!get_cur_sas(vcpu);
 345        unsigned int pid = get_cur_spid(vcpu);
 346        int esel, tlbsel;
 347        struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
 348
 349        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 350                esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
 351                if (esel >= 0) {
 352                        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 353                        break;
 354                }
 355        }
 356
 357        if (gtlbe) {
 358                esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
 359
 360                vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
 361                        | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 362                vcpu->arch.shared->mas1 = gtlbe->mas1;
 363                vcpu->arch.shared->mas2 = gtlbe->mas2;
 364                vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
 365        } else {
 366                int victim;
 367
 368                /* since we only have two TLBs, only lower bit is used. */
 369                tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
 370                victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
 371
 372                vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
 373                        | MAS0_ESEL(victim)
 374                        | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 375                vcpu->arch.shared->mas1 =
 376                          (vcpu->arch.shared->mas6 & MAS6_SPID0)
 377                        | ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0)
 378                        | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
 379                vcpu->arch.shared->mas2 &= MAS2_EPN;
 380                vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
 381                                           MAS2_ATTRIB_MASK;
 382                vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
 383                                             MAS3_U2 | MAS3_U3;
 384        }
 385
 386        kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
 387        return EMULATE_DONE;
 388}
 389
 390int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 391{
 392        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 393        struct kvm_book3e_206_tlb_entry *gtlbe;
 394        int tlbsel, esel;
 395        int recal = 0;
 396        int idx;
 397
 398        tlbsel = get_tlb_tlbsel(vcpu);
 399        esel = get_tlb_esel(vcpu, tlbsel);
 400
 401        gtlbe = get_entry(vcpu_e500, tlbsel, esel);
 402
 403        if (get_tlb_v(gtlbe)) {
 404                inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
 405                if ((tlbsel == 1) &&
 406                        kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
 407                        recal = 1;
 408        }
 409
 410        gtlbe->mas1 = vcpu->arch.shared->mas1;
 411        gtlbe->mas2 = vcpu->arch.shared->mas2;
 412        if (!(vcpu->arch.shared->msr & MSR_CM))
 413                gtlbe->mas2 &= 0xffffffffUL;
 414        gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
 415
 416        trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
 417                                      gtlbe->mas2, gtlbe->mas7_3);
 418
 419        if (tlbsel == 1) {
 420                /*
 421                 * If a valid tlb1 entry is overwritten then recalculate the
 422                 * min/max TLB1 map address range otherwise no need to look
 423                 * in tlb1 array.
 424                 */
 425                if (recal)
 426                        kvmppc_recalc_tlb1map_range(vcpu_e500);
 427                else
 428                        kvmppc_set_tlb1map_range(vcpu, gtlbe);
 429        }
 430
 431        idx = srcu_read_lock(&vcpu->kvm->srcu);
 432
 433        /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
 434        if (tlbe_is_host_safe(vcpu, gtlbe)) {
 435                u64 eaddr = get_tlb_eaddr(gtlbe);
 436                u64 raddr = get_tlb_raddr(gtlbe);
 437
 438                if (tlbsel == 0) {
 439                        gtlbe->mas1 &= ~MAS1_TSIZE(~0);
 440                        gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 441                }
 442
 443                /* Premap the faulting page */
 444                kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
 445        }
 446
 447        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 448
 449        kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
 450        return EMULATE_DONE;
 451}
 452
 453static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
 454                                  gva_t eaddr, unsigned int pid, int as)
 455{
 456        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 457        int esel, tlbsel;
 458
 459        for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 460                esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
 461                if (esel >= 0)
 462                        return index_of(tlbsel, esel);
 463        }
 464
 465        return -1;
 466}
 467
 468/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
 469int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
 470                               struct kvm_translation *tr)
 471{
 472        int index;
 473        gva_t eaddr;
 474        u8 pid;
 475        u8 as;
 476
 477        eaddr = tr->linear_address;
 478        pid = (tr->linear_address >> 32) & 0xff;
 479        as = (tr->linear_address >> 40) & 0x1;
 480
 481        index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
 482        if (index < 0) {
 483                tr->valid = 0;
 484                return 0;
 485        }
 486
 487        tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
 488        /* XXX what does "writeable" and "usermode" even mean? */
 489        tr->valid = 1;
 490
 491        return 0;
 492}
 493
 494
 495int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 496{
 497        unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 498
 499        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 500}
 501
 502int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 503{
 504        unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
 505
 506        return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 507}
 508
 509void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
 510{
 511        unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 512
 513        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
 514}
 515
 516void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
 517{
 518        unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
 519
 520        kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
 521}
 522
 523gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
 524                        gva_t eaddr)
 525{
 526        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 527        struct kvm_book3e_206_tlb_entry *gtlbe;
 528        u64 pgmask;
 529
 530        gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
 531        pgmask = get_tlb_bytes(gtlbe) - 1;
 532
 533        return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
 534}
 535
 536void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
 537{
 538}
 539
 540/*****************************************/
 541
 542static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
 543{
 544        int i;
 545
 546        kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
 547        kfree(vcpu_e500->g2h_tlb1_map);
 548        kfree(vcpu_e500->gtlb_priv[0]);
 549        kfree(vcpu_e500->gtlb_priv[1]);
 550
 551        if (vcpu_e500->shared_tlb_pages) {
 552                vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
 553                                          PAGE_SIZE)));
 554
 555                for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
 556                        set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
 557                        put_page(vcpu_e500->shared_tlb_pages[i]);
 558                }
 559
 560                vcpu_e500->num_shared_tlb_pages = 0;
 561
 562                kfree(vcpu_e500->shared_tlb_pages);
 563                vcpu_e500->shared_tlb_pages = NULL;
 564        } else {
 565                kfree(vcpu_e500->gtlb_arch);
 566        }
 567
 568        vcpu_e500->gtlb_arch = NULL;
 569}
 570
 571void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 572{
 573        sregs->u.e.mas0 = vcpu->arch.shared->mas0;
 574        sregs->u.e.mas1 = vcpu->arch.shared->mas1;
 575        sregs->u.e.mas2 = vcpu->arch.shared->mas2;
 576        sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
 577        sregs->u.e.mas4 = vcpu->arch.shared->mas4;
 578        sregs->u.e.mas6 = vcpu->arch.shared->mas6;
 579
 580        sregs->u.e.mmucfg = vcpu->arch.mmucfg;
 581        sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
 582        sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
 583        sregs->u.e.tlbcfg[2] = 0;
 584        sregs->u.e.tlbcfg[3] = 0;
 585}
 586
 587int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 588{
 589        if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
 590                vcpu->arch.shared->mas0 = sregs->u.e.mas0;
 591                vcpu->arch.shared->mas1 = sregs->u.e.mas1;
 592                vcpu->arch.shared->mas2 = sregs->u.e.mas2;
 593                vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
 594                vcpu->arch.shared->mas4 = sregs->u.e.mas4;
 595                vcpu->arch.shared->mas6 = sregs->u.e.mas6;
 596        }
 597
 598        return 0;
 599}
 600
 601int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
 602                                union kvmppc_one_reg *val)
 603{
 604        int r = 0;
 605        long int i;
 606
 607        switch (id) {
 608        case KVM_REG_PPC_MAS0:
 609                *val = get_reg_val(id, vcpu->arch.shared->mas0);
 610                break;
 611        case KVM_REG_PPC_MAS1:
 612                *val = get_reg_val(id, vcpu->arch.shared->mas1);
 613                break;
 614        case KVM_REG_PPC_MAS2:
 615                *val = get_reg_val(id, vcpu->arch.shared->mas2);
 616                break;
 617        case KVM_REG_PPC_MAS7_3:
 618                *val = get_reg_val(id, vcpu->arch.shared->mas7_3);
 619                break;
 620        case KVM_REG_PPC_MAS4:
 621                *val = get_reg_val(id, vcpu->arch.shared->mas4);
 622                break;
 623        case KVM_REG_PPC_MAS6:
 624                *val = get_reg_val(id, vcpu->arch.shared->mas6);
 625                break;
 626        case KVM_REG_PPC_MMUCFG:
 627                *val = get_reg_val(id, vcpu->arch.mmucfg);
 628                break;
 629        case KVM_REG_PPC_EPTCFG:
 630                *val = get_reg_val(id, vcpu->arch.eptcfg);
 631                break;
 632        case KVM_REG_PPC_TLB0CFG:
 633        case KVM_REG_PPC_TLB1CFG:
 634        case KVM_REG_PPC_TLB2CFG:
 635        case KVM_REG_PPC_TLB3CFG:
 636                i = id - KVM_REG_PPC_TLB0CFG;
 637                *val = get_reg_val(id, vcpu->arch.tlbcfg[i]);
 638                break;
 639        case KVM_REG_PPC_TLB0PS:
 640        case KVM_REG_PPC_TLB1PS:
 641        case KVM_REG_PPC_TLB2PS:
 642        case KVM_REG_PPC_TLB3PS:
 643                i = id - KVM_REG_PPC_TLB0PS;
 644                *val = get_reg_val(id, vcpu->arch.tlbps[i]);
 645                break;
 646        default:
 647                r = -EINVAL;
 648                break;
 649        }
 650
 651        return r;
 652}
 653
 654int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
 655                               union kvmppc_one_reg *val)
 656{
 657        int r = 0;
 658        long int i;
 659
 660        switch (id) {
 661        case KVM_REG_PPC_MAS0:
 662                vcpu->arch.shared->mas0 = set_reg_val(id, *val);
 663                break;
 664        case KVM_REG_PPC_MAS1:
 665                vcpu->arch.shared->mas1 = set_reg_val(id, *val);
 666                break;
 667        case KVM_REG_PPC_MAS2:
 668                vcpu->arch.shared->mas2 = set_reg_val(id, *val);
 669                break;
 670        case KVM_REG_PPC_MAS7_3:
 671                vcpu->arch.shared->mas7_3 = set_reg_val(id, *val);
 672                break;
 673        case KVM_REG_PPC_MAS4:
 674                vcpu->arch.shared->mas4 = set_reg_val(id, *val);
 675                break;
 676        case KVM_REG_PPC_MAS6:
 677                vcpu->arch.shared->mas6 = set_reg_val(id, *val);
 678                break;
 679        /* Only allow MMU registers to be set to the config supported by KVM */
 680        case KVM_REG_PPC_MMUCFG: {
 681                u32 reg = set_reg_val(id, *val);
 682                if (reg != vcpu->arch.mmucfg)
 683                        r = -EINVAL;
 684                break;
 685        }
 686        case KVM_REG_PPC_EPTCFG: {
 687                u32 reg = set_reg_val(id, *val);
 688                if (reg != vcpu->arch.eptcfg)
 689                        r = -EINVAL;
 690                break;
 691        }
 692        case KVM_REG_PPC_TLB0CFG:
 693        case KVM_REG_PPC_TLB1CFG:
 694        case KVM_REG_PPC_TLB2CFG:
 695        case KVM_REG_PPC_TLB3CFG: {
 696                /* MMU geometry (N_ENTRY/ASSOC) can be set only using SW_TLB */
 697                u32 reg = set_reg_val(id, *val);
 698                i = id - KVM_REG_PPC_TLB0CFG;
 699                if (reg != vcpu->arch.tlbcfg[i])
 700                        r = -EINVAL;
 701                break;
 702        }
 703        case KVM_REG_PPC_TLB0PS:
 704        case KVM_REG_PPC_TLB1PS:
 705        case KVM_REG_PPC_TLB2PS:
 706        case KVM_REG_PPC_TLB3PS: {
 707                u32 reg = set_reg_val(id, *val);
 708                i = id - KVM_REG_PPC_TLB0PS;
 709                if (reg != vcpu->arch.tlbps[i])
 710                        r = -EINVAL;
 711                break;
 712        }
 713        default:
 714                r = -EINVAL;
 715                break;
 716        }
 717
 718        return r;
 719}
 720
 721static int vcpu_mmu_geometry_update(struct kvm_vcpu *vcpu,
 722                struct kvm_book3e_206_tlb_params *params)
 723{
 724        vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 725        if (params->tlb_sizes[0] <= 2048)
 726                vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0];
 727        vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
 728
 729        vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 730        vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1];
 731        vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
 732        return 0;
 733}
 734
 735int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
 736                              struct kvm_config_tlb *cfg)
 737{
 738        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 739        struct kvm_book3e_206_tlb_params params;
 740        char *virt;
 741        struct page **pages;
 742        struct tlbe_priv *privs[2] = {};
 743        u64 *g2h_bitmap;
 744        size_t array_len;
 745        u32 sets;
 746        int num_pages, ret, i;
 747
 748        if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
 749                return -EINVAL;
 750
 751        if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
 752                           sizeof(params)))
 753                return -EFAULT;
 754
 755        if (params.tlb_sizes[1] > 64)
 756                return -EINVAL;
 757        if (params.tlb_ways[1] != params.tlb_sizes[1])
 758                return -EINVAL;
 759        if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
 760                return -EINVAL;
 761        if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
 762                return -EINVAL;
 763
 764        if (!is_power_of_2(params.tlb_ways[0]))
 765                return -EINVAL;
 766
 767        sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
 768        if (!is_power_of_2(sets))
 769                return -EINVAL;
 770
 771        array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
 772        array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
 773
 774        if (cfg->array_len < array_len)
 775                return -EINVAL;
 776
 777        num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
 778                    cfg->array / PAGE_SIZE;
 779        pages = kmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
 780        if (!pages)
 781                return -ENOMEM;
 782
 783        ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages);
 784        if (ret < 0)
 785                goto free_pages;
 786
 787        if (ret != num_pages) {
 788                num_pages = ret;
 789                ret = -EFAULT;
 790                goto put_pages;
 791        }
 792
 793        virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
 794        if (!virt) {
 795                ret = -ENOMEM;
 796                goto put_pages;
 797        }
 798
 799        privs[0] = kcalloc(params.tlb_sizes[0], sizeof(*privs[0]), GFP_KERNEL);
 800        if (!privs[0]) {
 801                ret = -ENOMEM;
 802                goto put_pages;
 803        }
 804
 805        privs[1] = kcalloc(params.tlb_sizes[1], sizeof(*privs[1]), GFP_KERNEL);
 806        if (!privs[1]) {
 807                ret = -ENOMEM;
 808                goto free_privs_first;
 809        }
 810
 811        g2h_bitmap = kcalloc(params.tlb_sizes[1],
 812                             sizeof(*g2h_bitmap),
 813                             GFP_KERNEL);
 814        if (!g2h_bitmap) {
 815                ret = -ENOMEM;
 816                goto free_privs_second;
 817        }
 818
 819        free_gtlb(vcpu_e500);
 820
 821        vcpu_e500->gtlb_priv[0] = privs[0];
 822        vcpu_e500->gtlb_priv[1] = privs[1];
 823        vcpu_e500->g2h_tlb1_map = g2h_bitmap;
 824
 825        vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
 826                (virt + (cfg->array & (PAGE_SIZE - 1)));
 827
 828        vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
 829        vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
 830
 831        vcpu_e500->gtlb_offset[0] = 0;
 832        vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
 833
 834        /* Update vcpu's MMU geometry based on SW_TLB input */
 835        vcpu_mmu_geometry_update(vcpu, &params);
 836
 837        vcpu_e500->shared_tlb_pages = pages;
 838        vcpu_e500->num_shared_tlb_pages = num_pages;
 839
 840        vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
 841        vcpu_e500->gtlb_params[0].sets = sets;
 842
 843        vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
 844        vcpu_e500->gtlb_params[1].sets = 1;
 845
 846        kvmppc_recalc_tlb1map_range(vcpu_e500);
 847        return 0;
 848 free_privs_second:
 849        kfree(privs[1]);
 850 free_privs_first:
 851        kfree(privs[0]);
 852 put_pages:
 853        for (i = 0; i < num_pages; i++)
 854                put_page(pages[i]);
 855 free_pages:
 856        kfree(pages);
 857        return ret;
 858}
 859
 860int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
 861                             struct kvm_dirty_tlb *dirty)
 862{
 863        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 864        kvmppc_recalc_tlb1map_range(vcpu_e500);
 865        kvmppc_core_flush_tlb(vcpu);
 866        return 0;
 867}
 868
 869/* Vcpu's MMU default configuration */
 870static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
 871                       struct kvmppc_e500_tlb_params *params)
 872{
 873        /* Initialize RASIZE, PIDSIZE, NTLBS and MAVN fields with host values*/
 874        vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
 875
 876        /* Initialize TLBnCFG fields with host values and SW_TLB geometry*/
 877        vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
 878                             ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 879        vcpu->arch.tlbcfg[0] |= params[0].entries;
 880        vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT;
 881
 882        vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
 883                             ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
 884        vcpu->arch.tlbcfg[1] |= params[1].entries;
 885        vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT;
 886
 887        if (has_feature(vcpu, VCPU_FTR_MMU_V2)) {
 888                vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS);
 889                vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS);
 890
 891                vcpu->arch.mmucfg &= ~MMUCFG_LRAT;
 892
 893                /* Guest mmu emulation currently doesn't handle E.PT */
 894                vcpu->arch.eptcfg = 0;
 895                vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT;
 896                vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND;
 897        }
 898
 899        return 0;
 900}
 901
 902int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
 903{
 904        struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
 905
 906        if (e500_mmu_host_init(vcpu_e500))
 907                goto free_vcpu;
 908
 909        vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
 910        vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
 911
 912        vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
 913        vcpu_e500->gtlb_params[0].sets =
 914                KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
 915
 916        vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
 917        vcpu_e500->gtlb_params[1].sets = 1;
 918
 919        vcpu_e500->gtlb_arch = kmalloc_array(KVM_E500_TLB0_SIZE +
 920                                             KVM_E500_TLB1_SIZE,
 921                                             sizeof(*vcpu_e500->gtlb_arch),
 922                                             GFP_KERNEL);
 923        if (!vcpu_e500->gtlb_arch)
 924                return -ENOMEM;
 925
 926        vcpu_e500->gtlb_offset[0] = 0;
 927        vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
 928
 929        vcpu_e500->gtlb_priv[0] = kcalloc(vcpu_e500->gtlb_params[0].entries,
 930                                          sizeof(struct tlbe_ref),
 931                                          GFP_KERNEL);
 932        if (!vcpu_e500->gtlb_priv[0])
 933                goto free_vcpu;
 934
 935        vcpu_e500->gtlb_priv[1] = kcalloc(vcpu_e500->gtlb_params[1].entries,
 936                                          sizeof(struct tlbe_ref),
 937                                          GFP_KERNEL);
 938        if (!vcpu_e500->gtlb_priv[1])
 939                goto free_vcpu;
 940
 941        vcpu_e500->g2h_tlb1_map = kcalloc(vcpu_e500->gtlb_params[1].entries,
 942                                          sizeof(*vcpu_e500->g2h_tlb1_map),
 943                                          GFP_KERNEL);
 944        if (!vcpu_e500->g2h_tlb1_map)
 945                goto free_vcpu;
 946
 947        vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params);
 948
 949        kvmppc_recalc_tlb1map_range(vcpu_e500);
 950        return 0;
 951 free_vcpu:
 952        free_gtlb(vcpu_e500);
 953        return -1;
 954}
 955
 956void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
 957{
 958        free_gtlb(vcpu_e500);
 959        e500_mmu_host_uninit(vcpu_e500);
 960}
 961