linux/arch/powerpc/kvm/e500.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Yu Liu, <yu.liu@freescale.com>
   5 *
   6 * Description:
   7 * This file is derived from arch/powerpc/kvm/44x.c,
   8 * by Hollis Blanchard <hollisb@us.ibm.com>.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License, version 2, as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/kvm_host.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/export.h>
  19
  20#include <asm/reg.h>
  21#include <asm/cputable.h>
  22#include <asm/tlbflush.h>
  23#include <asm/kvm_ppc.h>
  24
  25#include "../mm/mmu_decl.h"
  26#include "booke.h"
  27#include "e500.h"
  28
  29struct id {
  30        unsigned long val;
  31        struct id **pentry;
  32};
  33
  34#define NUM_TIDS 256
  35
  36/*
  37 * This table provide mappings from:
  38 * (guestAS,guestTID,guestPR) --> ID of physical cpu
  39 * guestAS      [0..1]
  40 * guestTID     [0..255]
  41 * guestPR      [0..1]
  42 * ID           [1..255]
  43 * Each vcpu keeps one vcpu_id_table.
  44 */
  45struct vcpu_id_table {
  46        struct id id[2][NUM_TIDS][2];
  47};
  48
  49/*
  50 * This table provide reversed mappings of vcpu_id_table:
  51 * ID --> address of vcpu_id_table item.
  52 * Each physical core has one pcpu_id_table.
  53 */
  54struct pcpu_id_table {
  55        struct id *entry[NUM_TIDS];
  56};
  57
  58static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
  59
  60/* This variable keeps last used shadow ID on local core.
  61 * The valid range of shadow ID is [1..255] */
  62static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
  63
  64/*
  65 * Allocate a free shadow id and setup a valid sid mapping in given entry.
  66 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
  67 *
  68 * The caller must have preemption disabled, and keep it that way until
  69 * it has finished with the returned shadow id (either written into the
  70 * TLB or arch.shadow_pid, or discarded).
  71 */
  72static inline int local_sid_setup_one(struct id *entry)
  73{
  74        unsigned long sid;
  75        int ret = -1;
  76
  77        sid = ++(__get_cpu_var(pcpu_last_used_sid));
  78        if (sid < NUM_TIDS) {
  79                __get_cpu_var(pcpu_sids).entry[sid] = entry;
  80                entry->val = sid;
  81                entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
  82                ret = sid;
  83        }
  84
  85        /*
  86         * If sid == NUM_TIDS, we've run out of sids.  We return -1, and
  87         * the caller will invalidate everything and start over.
  88         *
  89         * sid > NUM_TIDS indicates a race, which we disable preemption to
  90         * avoid.
  91         */
  92        WARN_ON(sid > NUM_TIDS);
  93
  94        return ret;
  95}
  96
  97/*
  98 * Check if given entry contain a valid shadow id mapping.
  99 * An ID mapping is considered valid only if
 100 * both vcpu and pcpu know this mapping.
 101 *
 102 * The caller must have preemption disabled, and keep it that way until
 103 * it has finished with the returned shadow id (either written into the
 104 * TLB or arch.shadow_pid, or discarded).
 105 */
 106static inline int local_sid_lookup(struct id *entry)
 107{
 108        if (entry && entry->val != 0 &&
 109            __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
 110            entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
 111                return entry->val;
 112        return -1;
 113}
 114
 115/* Invalidate all id mappings on local core -- call with preempt disabled */
 116static inline void local_sid_destroy_all(void)
 117{
 118        __get_cpu_var(pcpu_last_used_sid) = 0;
 119        memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
 120}
 121
 122static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
 123{
 124        vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
 125        return vcpu_e500->idt;
 126}
 127
 128static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
 129{
 130        kfree(vcpu_e500->idt);
 131        vcpu_e500->idt = NULL;
 132}
 133
 134/* Map guest pid to shadow.
 135 * We use PID to keep shadow of current guest non-zero PID,
 136 * and use PID1 to keep shadow of guest zero PID.
 137 * So that guest tlbe with TID=0 can be accessed at any time */
 138static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
 139{
 140        preempt_disable();
 141        vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
 142                        get_cur_as(&vcpu_e500->vcpu),
 143                        get_cur_pid(&vcpu_e500->vcpu),
 144                        get_cur_pr(&vcpu_e500->vcpu), 1);
 145        vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
 146                        get_cur_as(&vcpu_e500->vcpu), 0,
 147                        get_cur_pr(&vcpu_e500->vcpu), 1);
 148        preempt_enable();
 149}
 150
 151/* Invalidate all mappings on vcpu */
 152static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
 153{
 154        memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
 155
 156        /* Update shadow pid when mappings are changed */
 157        kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 158}
 159
 160/* Invalidate one ID mapping on vcpu */
 161static inline void kvmppc_e500_id_table_reset_one(
 162                               struct kvmppc_vcpu_e500 *vcpu_e500,
 163                               int as, int pid, int pr)
 164{
 165        struct vcpu_id_table *idt = vcpu_e500->idt;
 166
 167        BUG_ON(as >= 2);
 168        BUG_ON(pid >= NUM_TIDS);
 169        BUG_ON(pr >= 2);
 170
 171        idt->id[as][pid][pr].val = 0;
 172        idt->id[as][pid][pr].pentry = NULL;
 173
 174        /* Update shadow pid when mappings are changed */
 175        kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 176}
 177
 178/*
 179 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
 180 * This function first lookup if a valid mapping exists,
 181 * if not, then creates a new one.
 182 *
 183 * The caller must have preemption disabled, and keep it that way until
 184 * it has finished with the returned shadow id (either written into the
 185 * TLB or arch.shadow_pid, or discarded).
 186 */
 187unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
 188                                 unsigned int as, unsigned int gid,
 189                                 unsigned int pr, int avoid_recursion)
 190{
 191        struct vcpu_id_table *idt = vcpu_e500->idt;
 192        int sid;
 193
 194        BUG_ON(as >= 2);
 195        BUG_ON(gid >= NUM_TIDS);
 196        BUG_ON(pr >= 2);
 197
 198        sid = local_sid_lookup(&idt->id[as][gid][pr]);
 199
 200        while (sid <= 0) {
 201                /* No mapping yet */
 202                sid = local_sid_setup_one(&idt->id[as][gid][pr]);
 203                if (sid <= 0) {
 204                        _tlbil_all();
 205                        local_sid_destroy_all();
 206                }
 207
 208                /* Update shadow pid when mappings are changed */
 209                if (!avoid_recursion)
 210                        kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 211        }
 212
 213        return sid;
 214}
 215
 216unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
 217                                      struct kvm_book3e_206_tlb_entry *gtlbe)
 218{
 219        return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
 220                                   get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
 221}
 222
 223void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
 224{
 225        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 226
 227        if (vcpu->arch.pid != pid) {
 228                vcpu_e500->pid[0] = vcpu->arch.pid = pid;
 229                kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 230        }
 231}
 232
 233/* gtlbe must not be mapped by more than one host tlbe */
 234void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
 235                           struct kvm_book3e_206_tlb_entry *gtlbe)
 236{
 237        struct vcpu_id_table *idt = vcpu_e500->idt;
 238        unsigned int pr, tid, ts, pid;
 239        u32 val, eaddr;
 240        unsigned long flags;
 241
 242        ts = get_tlb_ts(gtlbe);
 243        tid = get_tlb_tid(gtlbe);
 244
 245        preempt_disable();
 246
 247        /* One guest ID may be mapped to two shadow IDs */
 248        for (pr = 0; pr < 2; pr++) {
 249                /*
 250                 * The shadow PID can have a valid mapping on at most one
 251                 * host CPU.  In the common case, it will be valid on this
 252                 * CPU, in which case we do a local invalidation of the
 253                 * specific address.
 254                 *
 255                 * If the shadow PID is not valid on the current host CPU,
 256                 * we invalidate the entire shadow PID.
 257                 */
 258                pid = local_sid_lookup(&idt->id[ts][tid][pr]);
 259                if (pid <= 0) {
 260                        kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
 261                        continue;
 262                }
 263
 264                /*
 265                 * The guest is invalidating a 4K entry which is in a PID
 266                 * that has a valid shadow mapping on this host CPU.  We
 267                 * search host TLB to invalidate it's shadow TLB entry,
 268                 * similar to __tlbil_va except that we need to look in AS1.
 269                 */
 270                val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
 271                eaddr = get_tlb_eaddr(gtlbe);
 272
 273                local_irq_save(flags);
 274
 275                mtspr(SPRN_MAS6, val);
 276                asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
 277                val = mfspr(SPRN_MAS1);
 278                if (val & MAS1_VALID) {
 279                        mtspr(SPRN_MAS1, val & ~MAS1_VALID);
 280                        asm volatile("tlbwe");
 281                }
 282
 283                local_irq_restore(flags);
 284        }
 285
 286        preempt_enable();
 287}
 288
 289void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
 290{
 291        kvmppc_e500_id_table_reset_all(vcpu_e500);
 292}
 293
 294void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
 295{
 296        /* Recalc shadow pid since MSR changes */
 297        kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
 298}
 299
 300void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
 301{
 302}
 303
 304void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
 305{
 306}
 307
 308void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 309{
 310        kvmppc_booke_vcpu_load(vcpu, cpu);
 311
 312        /* Shadow PID may be expired on local core */
 313        kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
 314}
 315
 316void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
 317{
 318#ifdef CONFIG_SPE
 319        if (vcpu->arch.shadow_msr & MSR_SPE)
 320                kvmppc_vcpu_disable_spe(vcpu);
 321#endif
 322
 323        kvmppc_booke_vcpu_put(vcpu);
 324}
 325
 326int kvmppc_core_check_processor_compat(void)
 327{
 328        int r;
 329
 330        if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0)
 331                r = 0;
 332        else
 333                r = -ENOTSUPP;
 334
 335        return r;
 336}
 337
 338static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
 339{
 340        struct kvm_book3e_206_tlb_entry *tlbe;
 341
 342        /* Insert large initial mapping for guest. */
 343        tlbe = get_entry(vcpu_e500, 1, 0);
 344        tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
 345        tlbe->mas2 = 0;
 346        tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
 347
 348        /* 4K map for serial output. Used by kernel wrapper. */
 349        tlbe = get_entry(vcpu_e500, 1, 1);
 350        tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 351        tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
 352        tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
 353}
 354
 355int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
 356{
 357        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 358
 359        kvmppc_e500_tlb_setup(vcpu_e500);
 360
 361        /* Registers init */
 362        vcpu->arch.pvr = mfspr(SPRN_PVR);
 363        vcpu_e500->svr = mfspr(SPRN_SVR);
 364
 365        vcpu->arch.cpu_type = KVM_CPU_E500V2;
 366
 367        return 0;
 368}
 369
 370void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 371{
 372        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 373
 374        sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE |
 375                               KVM_SREGS_E_PM;
 376        sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
 377
 378        sregs->u.e.impl.fsl.features = 0;
 379        sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
 380        sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
 381        sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
 382
 383        sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
 384        sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
 385        sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
 386        sregs->u.e.ivor_high[3] =
 387                vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
 388
 389        kvmppc_get_sregs_ivor(vcpu, sregs);
 390        kvmppc_get_sregs_e500_tlb(vcpu, sregs);
 391}
 392
 393int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 394{
 395        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 396        int ret;
 397
 398        if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
 399                vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
 400                vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
 401                vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
 402        }
 403
 404        ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
 405        if (ret < 0)
 406                return ret;
 407
 408        if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
 409                return 0;
 410
 411        if (sregs->u.e.features & KVM_SREGS_E_SPE) {
 412                vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] =
 413                        sregs->u.e.ivor_high[0];
 414                vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] =
 415                        sregs->u.e.ivor_high[1];
 416                vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] =
 417                        sregs->u.e.ivor_high[2];
 418        }
 419
 420        if (sregs->u.e.features & KVM_SREGS_E_PM) {
 421                vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
 422                        sregs->u.e.ivor_high[3];
 423        }
 424
 425        return kvmppc_set_sregs_ivor(vcpu, sregs);
 426}
 427
 428int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
 429                        union kvmppc_one_reg *val)
 430{
 431        int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
 432        return r;
 433}
 434
 435int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
 436                       union kvmppc_one_reg *val)
 437{
 438        int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
 439        return r;
 440}
 441
 442struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
 443{
 444        struct kvmppc_vcpu_e500 *vcpu_e500;
 445        struct kvm_vcpu *vcpu;
 446        int err;
 447
 448        vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
 449        if (!vcpu_e500) {
 450                err = -ENOMEM;
 451                goto out;
 452        }
 453
 454        vcpu = &vcpu_e500->vcpu;
 455        err = kvm_vcpu_init(vcpu, kvm, id);
 456        if (err)
 457                goto free_vcpu;
 458
 459        if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
 460                goto uninit_vcpu;
 461
 462        err = kvmppc_e500_tlb_init(vcpu_e500);
 463        if (err)
 464                goto uninit_id;
 465
 466        vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
 467        if (!vcpu->arch.shared)
 468                goto uninit_tlb;
 469
 470        return vcpu;
 471
 472uninit_tlb:
 473        kvmppc_e500_tlb_uninit(vcpu_e500);
 474uninit_id:
 475        kvmppc_e500_id_table_free(vcpu_e500);
 476uninit_vcpu:
 477        kvm_vcpu_uninit(vcpu);
 478free_vcpu:
 479        kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 480out:
 481        return ERR_PTR(err);
 482}
 483
 484void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
 485{
 486        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 487
 488        free_page((unsigned long)vcpu->arch.shared);
 489        kvmppc_e500_tlb_uninit(vcpu_e500);
 490        kvmppc_e500_id_table_free(vcpu_e500);
 491        kvm_vcpu_uninit(vcpu);
 492        kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 493}
 494
 495int kvmppc_core_init_vm(struct kvm *kvm)
 496{
 497        return 0;
 498}
 499
 500void kvmppc_core_destroy_vm(struct kvm *kvm)
 501{
 502}
 503
 504static int __init kvmppc_e500_init(void)
 505{
 506        int r, i;
 507        unsigned long ivor[3];
 508        /* Process remaining handlers above the generic first 16 */
 509        unsigned long *handler = &kvmppc_booke_handler_addr[16];
 510        unsigned long handler_len;
 511        unsigned long max_ivor = 0;
 512
 513        r = kvmppc_core_check_processor_compat();
 514        if (r)
 515                return r;
 516
 517        r = kvmppc_booke_init();
 518        if (r)
 519                return r;
 520
 521        /* copy extra E500 exception handlers */
 522        ivor[0] = mfspr(SPRN_IVOR32);
 523        ivor[1] = mfspr(SPRN_IVOR33);
 524        ivor[2] = mfspr(SPRN_IVOR34);
 525        for (i = 0; i < 3; i++) {
 526                if (ivor[i] > ivor[max_ivor])
 527                        max_ivor = i;
 528
 529                handler_len = handler[i + 1] - handler[i];
 530                memcpy((void *)kvmppc_booke_handlers + ivor[i],
 531                       (void *)handler[i], handler_len);
 532        }
 533        handler_len = handler[max_ivor + 1] - handler[max_ivor];
 534        flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
 535                           ivor[max_ivor] + handler_len);
 536
 537        return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
 538}
 539
 540static void __exit kvmppc_e500_exit(void)
 541{
 542        kvmppc_booke_exit();
 543}
 544
 545module_init(kvmppc_e500_init);
 546module_exit(kvmppc_e500_exit);
 547