linux/arch/powerpc/kvm/e500_emulate.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Yu Liu, <yu.liu@freescale.com>
   5 *
   6 * Description:
   7 * This file is derived from arch/powerpc/kvm/44x_emulate.c,
   8 * by Hollis Blanchard <hollisb@us.ibm.com>.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License, version 2, as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <asm/kvm_ppc.h>
  16#include <asm/disassemble.h>
  17#include <asm/dbell.h>
  18#include <asm/reg_booke.h>
  19
  20#include "booke.h"
  21#include "e500.h"
  22
  23#define XOP_DCBTLS  166
  24#define XOP_MSGSND  206
  25#define XOP_MSGCLR  238
  26#define XOP_MFTMR   366
  27#define XOP_TLBIVAX 786
  28#define XOP_TLBSX   914
  29#define XOP_TLBRE   946
  30#define XOP_TLBWE   978
  31#define XOP_TLBILX  18
  32#define XOP_EHPRIV  270
  33
  34#ifdef CONFIG_KVM_E500MC
  35static int dbell2prio(ulong param)
  36{
  37        int msg = param & PPC_DBELL_TYPE_MASK;
  38        int prio = -1;
  39
  40        switch (msg) {
  41        case PPC_DBELL_TYPE(PPC_DBELL):
  42                prio = BOOKE_IRQPRIO_DBELL;
  43                break;
  44        case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
  45                prio = BOOKE_IRQPRIO_DBELL_CRIT;
  46                break;
  47        default:
  48                break;
  49        }
  50
  51        return prio;
  52}
  53
  54static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
  55{
  56        ulong param = vcpu->arch.regs.gpr[rb];
  57        int prio = dbell2prio(param);
  58
  59        if (prio < 0)
  60                return EMULATE_FAIL;
  61
  62        clear_bit(prio, &vcpu->arch.pending_exceptions);
  63        return EMULATE_DONE;
  64}
  65
  66static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
  67{
  68        ulong param = vcpu->arch.regs.gpr[rb];
  69        int prio = dbell2prio(rb);
  70        int pir = param & PPC_DBELL_PIR_MASK;
  71        int i;
  72        struct kvm_vcpu *cvcpu;
  73
  74        if (prio < 0)
  75                return EMULATE_FAIL;
  76
  77        kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
  78                int cpir = cvcpu->arch.shared->pir;
  79                if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
  80                        set_bit(prio, &cvcpu->arch.pending_exceptions);
  81                        kvm_vcpu_kick(cvcpu);
  82                }
  83        }
  84
  85        return EMULATE_DONE;
  86}
  87#endif
  88
  89static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
  90                                   unsigned int inst, int *advance)
  91{
  92        int emulated = EMULATE_DONE;
  93
  94        switch (get_oc(inst)) {
  95        case EHPRIV_OC_DEBUG:
  96                run->exit_reason = KVM_EXIT_DEBUG;
  97                run->debug.arch.address = vcpu->arch.regs.nip;
  98                run->debug.arch.status = 0;
  99                kvmppc_account_exit(vcpu, DEBUG_EXITS);
 100                emulated = EMULATE_EXIT_USER;
 101                *advance = 0;
 102                break;
 103        default:
 104                emulated = EMULATE_FAIL;
 105        }
 106        return emulated;
 107}
 108
 109static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
 110{
 111        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 112
 113        /* Always fail to lock the cache */
 114        vcpu_e500->l1csr0 |= L1CSR0_CUL;
 115        return EMULATE_DONE;
 116}
 117
 118static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
 119                                  int rt)
 120{
 121        /* Expose one thread per vcpu */
 122        if (get_tmrn(inst) == TMRN_TMCFG0) {
 123                kvmppc_set_gpr(vcpu, rt,
 124                               1 | (1 << TMRN_TMCFG0_NATHRD_SHIFT));
 125                return EMULATE_DONE;
 126        }
 127
 128        return EMULATE_FAIL;
 129}
 130
 131int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
 132                                unsigned int inst, int *advance)
 133{
 134        int emulated = EMULATE_DONE;
 135        int ra = get_ra(inst);
 136        int rb = get_rb(inst);
 137        int rt = get_rt(inst);
 138        gva_t ea;
 139
 140        switch (get_op(inst)) {
 141        case 31:
 142                switch (get_xop(inst)) {
 143
 144                case XOP_DCBTLS:
 145                        emulated = kvmppc_e500_emul_dcbtls(vcpu);
 146                        break;
 147
 148#ifdef CONFIG_KVM_E500MC
 149                case XOP_MSGSND:
 150                        emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
 151                        break;
 152
 153                case XOP_MSGCLR:
 154                        emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
 155                        break;
 156#endif
 157
 158                case XOP_TLBRE:
 159                        emulated = kvmppc_e500_emul_tlbre(vcpu);
 160                        break;
 161
 162                case XOP_TLBWE:
 163                        emulated = kvmppc_e500_emul_tlbwe(vcpu);
 164                        break;
 165
 166                case XOP_TLBSX:
 167                        ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
 168                        emulated = kvmppc_e500_emul_tlbsx(vcpu, ea);
 169                        break;
 170
 171                case XOP_TLBILX: {
 172                        int type = rt & 0x3;
 173                        ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
 174                        emulated = kvmppc_e500_emul_tlbilx(vcpu, type, ea);
 175                        break;
 176                }
 177
 178                case XOP_TLBIVAX:
 179                        ea = kvmppc_get_ea_indexed(vcpu, ra, rb);
 180                        emulated = kvmppc_e500_emul_tlbivax(vcpu, ea);
 181                        break;
 182
 183                case XOP_MFTMR:
 184                        emulated = kvmppc_e500_emul_mftmr(vcpu, inst, rt);
 185                        break;
 186
 187                case XOP_EHPRIV:
 188                        emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
 189                                                           advance);
 190                        break;
 191
 192                default:
 193                        emulated = EMULATE_FAIL;
 194                }
 195
 196                break;
 197
 198        default:
 199                emulated = EMULATE_FAIL;
 200        }
 201
 202        if (emulated == EMULATE_FAIL)
 203                emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
 204
 205        return emulated;
 206}
 207
 208int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
 209{
 210        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 211        int emulated = EMULATE_DONE;
 212
 213        switch (sprn) {
 214#ifndef CONFIG_KVM_BOOKE_HV
 215        case SPRN_PID:
 216                kvmppc_set_pid(vcpu, spr_val);
 217                break;
 218        case SPRN_PID1:
 219                if (spr_val != 0)
 220                        return EMULATE_FAIL;
 221                vcpu_e500->pid[1] = spr_val;
 222                break;
 223        case SPRN_PID2:
 224                if (spr_val != 0)
 225                        return EMULATE_FAIL;
 226                vcpu_e500->pid[2] = spr_val;
 227                break;
 228        case SPRN_MAS0:
 229                vcpu->arch.shared->mas0 = spr_val;
 230                break;
 231        case SPRN_MAS1:
 232                vcpu->arch.shared->mas1 = spr_val;
 233                break;
 234        case SPRN_MAS2:
 235                vcpu->arch.shared->mas2 = spr_val;
 236                break;
 237        case SPRN_MAS3:
 238                vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
 239                vcpu->arch.shared->mas7_3 |= spr_val;
 240                break;
 241        case SPRN_MAS4:
 242                vcpu->arch.shared->mas4 = spr_val;
 243                break;
 244        case SPRN_MAS6:
 245                vcpu->arch.shared->mas6 = spr_val;
 246                break;
 247        case SPRN_MAS7:
 248                vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
 249                vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
 250                break;
 251#endif
 252        case SPRN_L1CSR0:
 253                vcpu_e500->l1csr0 = spr_val;
 254                vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
 255                break;
 256        case SPRN_L1CSR1:
 257                vcpu_e500->l1csr1 = spr_val;
 258                vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
 259                break;
 260        case SPRN_HID0:
 261                vcpu_e500->hid0 = spr_val;
 262                break;
 263        case SPRN_HID1:
 264                vcpu_e500->hid1 = spr_val;
 265                break;
 266
 267        case SPRN_MMUCSR0:
 268                emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
 269                                spr_val);
 270                break;
 271
 272        case SPRN_PWRMGTCR0:
 273                /*
 274                 * Guest relies on host power management configurations
 275                 * Treat the request as a general store
 276                 */
 277                vcpu->arch.pwrmgtcr0 = spr_val;
 278                break;
 279
 280        /* extra exceptions */
 281#ifdef CONFIG_SPE_POSSIBLE
 282        case SPRN_IVOR32:
 283                vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
 284                break;
 285        case SPRN_IVOR33:
 286                vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
 287                break;
 288        case SPRN_IVOR34:
 289                vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
 290                break;
 291#endif
 292#ifdef CONFIG_ALTIVEC
 293        case SPRN_IVOR32:
 294                vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
 295                break;
 296        case SPRN_IVOR33:
 297                vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
 298                break;
 299#endif
 300        case SPRN_IVOR35:
 301                vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
 302                break;
 303#ifdef CONFIG_KVM_BOOKE_HV
 304        case SPRN_IVOR36:
 305                vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
 306                break;
 307        case SPRN_IVOR37:
 308                vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
 309                break;
 310#endif
 311        default:
 312                emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
 313        }
 314
 315        return emulated;
 316}
 317
 318int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
 319{
 320        struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 321        int emulated = EMULATE_DONE;
 322
 323        switch (sprn) {
 324#ifndef CONFIG_KVM_BOOKE_HV
 325        case SPRN_PID:
 326                *spr_val = vcpu_e500->pid[0];
 327                break;
 328        case SPRN_PID1:
 329                *spr_val = vcpu_e500->pid[1];
 330                break;
 331        case SPRN_PID2:
 332                *spr_val = vcpu_e500->pid[2];
 333                break;
 334        case SPRN_MAS0:
 335                *spr_val = vcpu->arch.shared->mas0;
 336                break;
 337        case SPRN_MAS1:
 338                *spr_val = vcpu->arch.shared->mas1;
 339                break;
 340        case SPRN_MAS2:
 341                *spr_val = vcpu->arch.shared->mas2;
 342                break;
 343        case SPRN_MAS3:
 344                *spr_val = (u32)vcpu->arch.shared->mas7_3;
 345                break;
 346        case SPRN_MAS4:
 347                *spr_val = vcpu->arch.shared->mas4;
 348                break;
 349        case SPRN_MAS6:
 350                *spr_val = vcpu->arch.shared->mas6;
 351                break;
 352        case SPRN_MAS7:
 353                *spr_val = vcpu->arch.shared->mas7_3 >> 32;
 354                break;
 355#endif
 356        case SPRN_DECAR:
 357                *spr_val = vcpu->arch.decar;
 358                break;
 359        case SPRN_TLB0CFG:
 360                *spr_val = vcpu->arch.tlbcfg[0];
 361                break;
 362        case SPRN_TLB1CFG:
 363                *spr_val = vcpu->arch.tlbcfg[1];
 364                break;
 365        case SPRN_TLB0PS:
 366                if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
 367                        return EMULATE_FAIL;
 368                *spr_val = vcpu->arch.tlbps[0];
 369                break;
 370        case SPRN_TLB1PS:
 371                if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
 372                        return EMULATE_FAIL;
 373                *spr_val = vcpu->arch.tlbps[1];
 374                break;
 375        case SPRN_L1CSR0:
 376                *spr_val = vcpu_e500->l1csr0;
 377                break;
 378        case SPRN_L1CSR1:
 379                *spr_val = vcpu_e500->l1csr1;
 380                break;
 381        case SPRN_HID0:
 382                *spr_val = vcpu_e500->hid0;
 383                break;
 384        case SPRN_HID1:
 385                *spr_val = vcpu_e500->hid1;
 386                break;
 387        case SPRN_SVR:
 388                *spr_val = vcpu_e500->svr;
 389                break;
 390
 391        case SPRN_MMUCSR0:
 392                *spr_val = 0;
 393                break;
 394
 395        case SPRN_MMUCFG:
 396                *spr_val = vcpu->arch.mmucfg;
 397                break;
 398        case SPRN_EPTCFG:
 399                if (!has_feature(vcpu, VCPU_FTR_MMU_V2))
 400                        return EMULATE_FAIL;
 401                /*
 402                 * Legacy Linux guests access EPTCFG register even if the E.PT
 403                 * category is disabled in the VM. Give them a chance to live.
 404                 */
 405                *spr_val = vcpu->arch.eptcfg;
 406                break;
 407
 408        case SPRN_PWRMGTCR0:
 409                *spr_val = vcpu->arch.pwrmgtcr0;
 410                break;
 411
 412        /* extra exceptions */
 413#ifdef CONFIG_SPE_POSSIBLE
 414        case SPRN_IVOR32:
 415                *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
 416                break;
 417        case SPRN_IVOR33:
 418                *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
 419                break;
 420        case SPRN_IVOR34:
 421                *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
 422                break;
 423#endif
 424#ifdef CONFIG_ALTIVEC
 425        case SPRN_IVOR32:
 426                *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
 427                break;
 428        case SPRN_IVOR33:
 429                *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
 430                break;
 431#endif
 432        case SPRN_IVOR35:
 433                *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
 434                break;
 435#ifdef CONFIG_KVM_BOOKE_HV
 436        case SPRN_IVOR36:
 437                *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
 438                break;
 439        case SPRN_IVOR37:
 440                *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
 441                break;
 442#endif
 443        default:
 444                emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
 445        }
 446
 447        return emulated;
 448}
 449
 450