linux/arch/powerpc/kvm/e500mc.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Varun Sethi, <varun.sethi@freescale.com>
   5 *
   6 * Description:
   7 * This file is derived from arch/powerpc/kvm/e500.c,
   8 * by Yu Liu <yu.liu@freescale.com>.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License, version 2, as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/kvm_host.h>
  16#include <linux/slab.h>
  17#include <linux/err.h>
  18#include <linux/export.h>
  19#include <linux/miscdevice.h>
  20#include <linux/module.h>
  21
  22#include <asm/reg.h>
  23#include <asm/cputable.h>
  24#include <asm/tlbflush.h>
  25#include <asm/kvm_ppc.h>
  26#include <asm/dbell.h>
  27
  28#include "booke.h"
  29#include "e500.h"
  30
  31void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
  32{
  33        enum ppc_dbell dbell_type;
  34        unsigned long tag;
  35
  36        switch (type) {
  37        case INT_CLASS_NONCRIT:
  38                dbell_type = PPC_G_DBELL;
  39                break;
  40        case INT_CLASS_CRIT:
  41                dbell_type = PPC_G_DBELL_CRIT;
  42                break;
  43        case INT_CLASS_MC:
  44                dbell_type = PPC_G_DBELL_MC;
  45                break;
  46        default:
  47                WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type);
  48                return;
  49        }
  50
  51        preempt_disable();
  52        tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id;
  53        mb();
  54        ppc_msgsnd(dbell_type, 0, tag);
  55        preempt_enable();
  56}
  57
  58/* gtlbe must not be mapped by more than one host tlb entry */
  59void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
  60                           struct kvm_book3e_206_tlb_entry *gtlbe)
  61{
  62        unsigned int tid, ts;
  63        gva_t eaddr;
  64        u32 val;
  65        unsigned long flags;
  66
  67        ts = get_tlb_ts(gtlbe);
  68        tid = get_tlb_tid(gtlbe);
  69
  70        /* We search the host TLB to invalidate its shadow TLB entry */
  71        val = (tid << 16) | ts;
  72        eaddr = get_tlb_eaddr(gtlbe);
  73
  74        local_irq_save(flags);
  75
  76        mtspr(SPRN_MAS6, val);
  77        mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
  78
  79        asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
  80        val = mfspr(SPRN_MAS1);
  81        if (val & MAS1_VALID) {
  82                mtspr(SPRN_MAS1, val & ~MAS1_VALID);
  83                asm volatile("tlbwe");
  84        }
  85        mtspr(SPRN_MAS5, 0);
  86        /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */
  87        mtspr(SPRN_MAS8, 0);
  88        isync();
  89
  90        local_irq_restore(flags);
  91}
  92
  93void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
  94{
  95        unsigned long flags;
  96
  97        local_irq_save(flags);
  98        mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
  99        asm volatile("tlbilxlpid");
 100        mtspr(SPRN_MAS5, 0);
 101        local_irq_restore(flags);
 102}
 103
 104void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
 105{
 106        vcpu->arch.pid = pid;
 107}
 108
 109void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
 110{
 111}
 112
 113/* We use two lpids per VM */
 114static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
 115
 116static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
 117{
 118        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 119
 120        kvmppc_booke_vcpu_load(vcpu, cpu);
 121
 122        mtspr(SPRN_LPID, get_lpid(vcpu));
 123        mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
 124        mtspr(SPRN_GPIR, vcpu->vcpu_id);
 125        mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
 126        vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT);
 127        vcpu->arch.epsc = vcpu->arch.eplc;
 128        mtspr(SPRN_EPLC, vcpu->arch.eplc);
 129        mtspr(SPRN_EPSC, vcpu->arch.epsc);
 130
 131        mtspr(SPRN_GIVPR, vcpu->arch.ivpr);
 132        mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
 133        mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
 134        mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0);
 135        mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1);
 136        mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2);
 137        mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3);
 138
 139        mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0);
 140        mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1);
 141
 142        mtspr(SPRN_GEPR, vcpu->arch.epr);
 143        mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
 144        mtspr(SPRN_GESR, vcpu->arch.shared->esr);
 145
 146        if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
 147            __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) {
 148                kvmppc_e500_tlbil_all(vcpu_e500);
 149                __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu);
 150        }
 151}
 152
 153static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
 154{
 155        vcpu->arch.eplc = mfspr(SPRN_EPLC);
 156        vcpu->arch.epsc = mfspr(SPRN_EPSC);
 157
 158        vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0);
 159        vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1);
 160        vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2);
 161        vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3);
 162
 163        vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0);
 164        vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1);
 165
 166        vcpu->arch.epr = mfspr(SPRN_GEPR);
 167        vcpu->arch.shared->dar = mfspr(SPRN_GDEAR);
 168        vcpu->arch.shared->esr = mfspr(SPRN_GESR);
 169
 170        vcpu->arch.oldpir = mfspr(SPRN_PIR);
 171
 172        kvmppc_booke_vcpu_put(vcpu);
 173}
 174
 175int kvmppc_core_check_processor_compat(void)
 176{
 177        int r;
 178
 179        if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0)
 180                r = 0;
 181        else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
 182                r = 0;
 183#ifdef CONFIG_ALTIVEC
 184        /*
 185         * Since guests have the privilege to enable AltiVec, we need AltiVec
 186         * support in the host to save/restore their context.
 187         * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
 188         * because it's cleared in the absence of CONFIG_ALTIVEC!
 189         */
 190        else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
 191                r = 0;
 192#endif
 193        else
 194                r = -ENOTSUPP;
 195
 196        return r;
 197}
 198
 199int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
 200{
 201        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 202
 203        vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
 204                                 SPRN_EPCR_DUVD;
 205#ifdef CONFIG_64BIT
 206        vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
 207#endif
 208        vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP;
 209
 210        vcpu->arch.pvr = mfspr(SPRN_PVR);
 211        vcpu_e500->svr = mfspr(SPRN_SVR);
 212
 213        vcpu->arch.cpu_type = KVM_CPU_E500MC;
 214
 215        return 0;
 216}
 217
 218static int kvmppc_core_get_sregs_e500mc(struct kvm_vcpu *vcpu,
 219                                        struct kvm_sregs *sregs)
 220{
 221        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 222
 223        sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM |
 224                               KVM_SREGS_E_PC;
 225        sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
 226
 227        sregs->u.e.impl.fsl.features = 0;
 228        sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
 229        sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
 230        sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
 231
 232        kvmppc_get_sregs_e500_tlb(vcpu, sregs);
 233
 234        sregs->u.e.ivor_high[3] =
 235                vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
 236        sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
 237        sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
 238
 239        return kvmppc_get_sregs_ivor(vcpu, sregs);
 240}
 241
 242static int kvmppc_core_set_sregs_e500mc(struct kvm_vcpu *vcpu,
 243                                        struct kvm_sregs *sregs)
 244{
 245        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 246        int ret;
 247
 248        if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
 249                vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
 250                vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
 251                vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
 252        }
 253
 254        ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
 255        if (ret < 0)
 256                return ret;
 257
 258        if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
 259                return 0;
 260
 261        if (sregs->u.e.features & KVM_SREGS_E_PM) {
 262                vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
 263                        sregs->u.e.ivor_high[3];
 264        }
 265
 266        if (sregs->u.e.features & KVM_SREGS_E_PC) {
 267                vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] =
 268                        sregs->u.e.ivor_high[4];
 269                vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] =
 270                        sregs->u.e.ivor_high[5];
 271        }
 272
 273        return kvmppc_set_sregs_ivor(vcpu, sregs);
 274}
 275
 276static int kvmppc_get_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
 277                              union kvmppc_one_reg *val)
 278{
 279        int r = 0;
 280
 281        switch (id) {
 282        case KVM_REG_PPC_SPRG9:
 283                *val = get_reg_val(id, vcpu->arch.sprg9);
 284                break;
 285        default:
 286                r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val);
 287        }
 288
 289        return r;
 290}
 291
 292static int kvmppc_set_one_reg_e500mc(struct kvm_vcpu *vcpu, u64 id,
 293                              union kvmppc_one_reg *val)
 294{
 295        int r = 0;
 296
 297        switch (id) {
 298        case KVM_REG_PPC_SPRG9:
 299                vcpu->arch.sprg9 = set_reg_val(id, *val);
 300                break;
 301        default:
 302                r = kvmppc_set_one_reg_e500_tlb(vcpu, id, val);
 303        }
 304
 305        return r;
 306}
 307
 308static struct kvm_vcpu *kvmppc_core_vcpu_create_e500mc(struct kvm *kvm,
 309                                                       unsigned int id)
 310{
 311        struct kvmppc_vcpu_e500 *vcpu_e500;
 312        struct kvm_vcpu *vcpu;
 313        int err;
 314
 315        vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
 316        if (!vcpu_e500) {
 317                err = -ENOMEM;
 318                goto out;
 319        }
 320        vcpu = &vcpu_e500->vcpu;
 321
 322        /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
 323        vcpu->arch.oldpir = 0xffffffff;
 324
 325        err = kvm_vcpu_init(vcpu, kvm, id);
 326        if (err)
 327                goto free_vcpu;
 328
 329        err = kvmppc_e500_tlb_init(vcpu_e500);
 330        if (err)
 331                goto uninit_vcpu;
 332
 333        vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 334        if (!vcpu->arch.shared) {
 335                err = -ENOMEM;
 336                goto uninit_tlb;
 337        }
 338
 339        return vcpu;
 340
 341uninit_tlb:
 342        kvmppc_e500_tlb_uninit(vcpu_e500);
 343uninit_vcpu:
 344        kvm_vcpu_uninit(vcpu);
 345
 346free_vcpu:
 347        kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 348out:
 349        return ERR_PTR(err);
 350}
 351
 352static void kvmppc_core_vcpu_free_e500mc(struct kvm_vcpu *vcpu)
 353{
 354        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 355
 356        free_page((unsigned long)vcpu->arch.shared);
 357        kvmppc_e500_tlb_uninit(vcpu_e500);
 358        kvm_vcpu_uninit(vcpu);
 359        kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 360}
 361
 362static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
 363{
 364        int lpid;
 365
 366        lpid = kvmppc_alloc_lpid();
 367        if (lpid < 0)
 368                return lpid;
 369
 370        /*
 371         * Use two lpids per VM on cores with two threads like e6500. Use
 372         * even numbers to speedup vcpu lpid computation with consecutive lpids
 373         * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
 374         */
 375        if (threads_per_core == 2)
 376                lpid <<= 1;
 377
 378        kvm->arch.lpid = lpid;
 379        return 0;
 380}
 381
 382static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
 383{
 384        int lpid = kvm->arch.lpid;
 385
 386        if (threads_per_core == 2)
 387                lpid >>= 1;
 388
 389        kvmppc_free_lpid(lpid);
 390}
 391
 392static struct kvmppc_ops kvm_ops_e500mc = {
 393        .get_sregs = kvmppc_core_get_sregs_e500mc,
 394        .set_sregs = kvmppc_core_set_sregs_e500mc,
 395        .get_one_reg = kvmppc_get_one_reg_e500mc,
 396        .set_one_reg = kvmppc_set_one_reg_e500mc,
 397        .vcpu_load   = kvmppc_core_vcpu_load_e500mc,
 398        .vcpu_put    = kvmppc_core_vcpu_put_e500mc,
 399        .vcpu_create = kvmppc_core_vcpu_create_e500mc,
 400        .vcpu_free   = kvmppc_core_vcpu_free_e500mc,
 401        .mmu_destroy  = kvmppc_mmu_destroy_e500,
 402        .init_vm = kvmppc_core_init_vm_e500mc,
 403        .destroy_vm = kvmppc_core_destroy_vm_e500mc,
 404        .emulate_op = kvmppc_core_emulate_op_e500,
 405        .emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
 406        .emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
 407};
 408
 409static int __init kvmppc_e500mc_init(void)
 410{
 411        int r;
 412
 413        r = kvmppc_booke_init();
 414        if (r)
 415                goto err_out;
 416
 417        /*
 418         * Use two lpids per VM on dual threaded processors like e6500
 419         * to workarround the lack of tlb write conditional instruction.
 420         * Expose half the number of available hardware lpids to the lpid
 421         * allocator.
 422         */
 423        kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
 424        kvmppc_claim_lpid(0); /* host */
 425
 426        r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
 427        if (r)
 428                goto err_out;
 429        kvm_ops_e500mc.owner = THIS_MODULE;
 430        kvmppc_pr_ops = &kvm_ops_e500mc;
 431
 432err_out:
 433        return r;
 434}
 435
 436static void __exit kvmppc_e500mc_exit(void)
 437{
 438        kvmppc_pr_ops = NULL;
 439        kvmppc_booke_exit();
 440}
 441
 442module_init(kvmppc_e500mc_init);
 443module_exit(kvmppc_e500mc_exit);
 444MODULE_ALIAS_MISCDEV(KVM_MINOR);
 445MODULE_ALIAS("devname:kvm");
 446