linux/arch/powerpc/kvm/emulate.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2007
  16 *
  17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18 */
  19
  20#include <linux/jiffies.h>
  21#include <linux/hrtimer.h>
  22#include <linux/types.h>
  23#include <linux/string.h>
  24#include <linux/kvm_host.h>
  25
  26#include <asm/reg.h>
  27#include <asm/time.h>
  28#include <asm/byteorder.h>
  29#include <asm/kvm_ppc.h>
  30#include <asm/disassemble.h>
  31#include "timing.h"
  32#include "trace.h"
  33
  34#define OP_TRAP 3
  35#define OP_TRAP_64 2
  36
  37#define OP_31_XOP_LWZX      23
  38#define OP_31_XOP_LBZX      87
  39#define OP_31_XOP_STWX      151
  40#define OP_31_XOP_STBX      215
  41#define OP_31_XOP_LBZUX     119
  42#define OP_31_XOP_STBUX     247
  43#define OP_31_XOP_LHZX      279
  44#define OP_31_XOP_LHZUX     311
  45#define OP_31_XOP_MFSPR     339
  46#define OP_31_XOP_LHAX      343
  47#define OP_31_XOP_STHX      407
  48#define OP_31_XOP_STHUX     439
  49#define OP_31_XOP_MTSPR     467
  50#define OP_31_XOP_DCBI      470
  51#define OP_31_XOP_LWBRX     534
  52#define OP_31_XOP_TLBSYNC   566
  53#define OP_31_XOP_STWBRX    662
  54#define OP_31_XOP_LHBRX     790
  55#define OP_31_XOP_STHBRX    918
  56
  57#define OP_LWZ  32
  58#define OP_LWZU 33
  59#define OP_LBZ  34
  60#define OP_LBZU 35
  61#define OP_STW  36
  62#define OP_STWU 37
  63#define OP_STB  38
  64#define OP_STBU 39
  65#define OP_LHZ  40
  66#define OP_LHZU 41
  67#define OP_LHA  42
  68#define OP_LHAU 43
  69#define OP_STH  44
  70#define OP_STHU 45
  71
  72#ifdef CONFIG_PPC_BOOK3S
  73static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
  74{
  75        return 1;
  76}
  77#else
  78static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
  79{
  80        return vcpu->arch.tcr & TCR_DIE;
  81}
  82#endif
  83
  84void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
  85{
  86        unsigned long dec_nsec;
  87
  88        pr_debug("mtDEC: %x\n", vcpu->arch.dec);
  89#ifdef CONFIG_PPC_BOOK3S
  90        /* mtdec lowers the interrupt line when positive. */
  91        kvmppc_core_dequeue_dec(vcpu);
  92
  93        /* POWER4+ triggers a dec interrupt if the value is < 0 */
  94        if (vcpu->arch.dec & 0x80000000) {
  95                hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
  96                kvmppc_core_queue_dec(vcpu);
  97                return;
  98        }
  99#endif
 100        if (kvmppc_dec_enabled(vcpu)) {
 101                /* The decrementer ticks at the same rate as the timebase, so
 102                 * that's how we convert the guest DEC value to the number of
 103                 * host ticks. */
 104
 105                hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
 106                dec_nsec = vcpu->arch.dec;
 107                dec_nsec *= 1000;
 108                dec_nsec /= tb_ticks_per_usec;
 109                hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
 110                              HRTIMER_MODE_REL);
 111                vcpu->arch.dec_jiffies = get_tb();
 112        } else {
 113                hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
 114        }
 115}
 116
 117u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
 118{
 119        u64 jd = tb - vcpu->arch.dec_jiffies;
 120        return vcpu->arch.dec - jd;
 121}
 122
 123/* XXX to do:
 124 * lhax
 125 * lhaux
 126 * lswx
 127 * lswi
 128 * stswx
 129 * stswi
 130 * lha
 131 * lhau
 132 * lmw
 133 * stmw
 134 *
 135 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
 136 */
 137/* XXX Should probably auto-generate instruction decoding for a particular core
 138 * from opcode tables in the future. */
 139int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
 140{
 141        u32 inst = kvmppc_get_last_inst(vcpu);
 142        u32 ea;
 143        int ra;
 144        int rb;
 145        int rs;
 146        int rt;
 147        int sprn;
 148        enum emulation_result emulated = EMULATE_DONE;
 149        int advance = 1;
 150
 151        /* this default type might be overwritten by subcategories */
 152        kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
 153
 154        pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
 155
 156        switch (get_op(inst)) {
 157        case OP_TRAP:
 158#ifdef CONFIG_PPC_BOOK3S
 159        case OP_TRAP_64:
 160                kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
 161#else
 162                kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
 163#endif
 164                advance = 0;
 165                break;
 166
 167        case 31:
 168                switch (get_xop(inst)) {
 169
 170                case OP_31_XOP_LWZX:
 171                        rt = get_rt(inst);
 172                        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
 173                        break;
 174
 175                case OP_31_XOP_LBZX:
 176                        rt = get_rt(inst);
 177                        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 178                        break;
 179
 180                case OP_31_XOP_LBZUX:
 181                        rt = get_rt(inst);
 182                        ra = get_ra(inst);
 183                        rb = get_rb(inst);
 184
 185                        ea = kvmppc_get_gpr(vcpu, rb);
 186                        if (ra)
 187                                ea += kvmppc_get_gpr(vcpu, ra);
 188
 189                        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 190                        kvmppc_set_gpr(vcpu, ra, ea);
 191                        break;
 192
 193                case OP_31_XOP_STWX:
 194                        rs = get_rs(inst);
 195                        emulated = kvmppc_handle_store(run, vcpu,
 196                                                       kvmppc_get_gpr(vcpu, rs),
 197                                                       4, 1);
 198                        break;
 199
 200                case OP_31_XOP_STBX:
 201                        rs = get_rs(inst);
 202                        emulated = kvmppc_handle_store(run, vcpu,
 203                                                       kvmppc_get_gpr(vcpu, rs),
 204                                                       1, 1);
 205                        break;
 206
 207                case OP_31_XOP_STBUX:
 208                        rs = get_rs(inst);
 209                        ra = get_ra(inst);
 210                        rb = get_rb(inst);
 211
 212                        ea = kvmppc_get_gpr(vcpu, rb);
 213                        if (ra)
 214                                ea += kvmppc_get_gpr(vcpu, ra);
 215
 216                        emulated = kvmppc_handle_store(run, vcpu,
 217                                                       kvmppc_get_gpr(vcpu, rs),
 218                                                       1, 1);
 219                        kvmppc_set_gpr(vcpu, rs, ea);
 220                        break;
 221
 222                case OP_31_XOP_LHAX:
 223                        rt = get_rt(inst);
 224                        emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
 225                        break;
 226
 227                case OP_31_XOP_LHZX:
 228                        rt = get_rt(inst);
 229                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
 230                        break;
 231
 232                case OP_31_XOP_LHZUX:
 233                        rt = get_rt(inst);
 234                        ra = get_ra(inst);
 235                        rb = get_rb(inst);
 236
 237                        ea = kvmppc_get_gpr(vcpu, rb);
 238                        if (ra)
 239                                ea += kvmppc_get_gpr(vcpu, ra);
 240
 241                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
 242                        kvmppc_set_gpr(vcpu, ra, ea);
 243                        break;
 244
 245                case OP_31_XOP_MFSPR:
 246                        sprn = get_sprn(inst);
 247                        rt = get_rt(inst);
 248
 249                        switch (sprn) {
 250                        case SPRN_SRR0:
 251                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
 252                                break;
 253                        case SPRN_SRR1:
 254                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
 255                                break;
 256                        case SPRN_PVR:
 257                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
 258                        case SPRN_PIR:
 259                                kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
 260                        case SPRN_MSSSR0:
 261                                kvmppc_set_gpr(vcpu, rt, 0); break;
 262
 263                        /* Note: mftb and TBRL/TBWL are user-accessible, so
 264                         * the guest can always access the real TB anyways.
 265                         * In fact, we probably will never see these traps. */
 266                        case SPRN_TBWL:
 267                                kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
 268                        case SPRN_TBWU:
 269                                kvmppc_set_gpr(vcpu, rt, get_tb()); break;
 270
 271                        case SPRN_SPRG0:
 272                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
 273                                break;
 274                        case SPRN_SPRG1:
 275                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
 276                                break;
 277                        case SPRN_SPRG2:
 278                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
 279                                break;
 280                        case SPRN_SPRG3:
 281                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
 282                                break;
 283                        /* Note: SPRG4-7 are user-readable, so we don't get
 284                         * a trap. */
 285
 286                        case SPRN_DEC:
 287                        {
 288                                kvmppc_set_gpr(vcpu, rt,
 289                                               kvmppc_get_dec(vcpu, get_tb()));
 290                                break;
 291                        }
 292                        default:
 293                                emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
 294                                if (emulated == EMULATE_FAIL) {
 295                                        printk("mfspr: unknown spr %x\n", sprn);
 296                                        kvmppc_set_gpr(vcpu, rt, 0);
 297                                }
 298                                break;
 299                        }
 300                        kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
 301                        break;
 302
 303                case OP_31_XOP_STHX:
 304                        rs = get_rs(inst);
 305                        ra = get_ra(inst);
 306                        rb = get_rb(inst);
 307
 308                        emulated = kvmppc_handle_store(run, vcpu,
 309                                                       kvmppc_get_gpr(vcpu, rs),
 310                                                       2, 1);
 311                        break;
 312
 313                case OP_31_XOP_STHUX:
 314                        rs = get_rs(inst);
 315                        ra = get_ra(inst);
 316                        rb = get_rb(inst);
 317
 318                        ea = kvmppc_get_gpr(vcpu, rb);
 319                        if (ra)
 320                                ea += kvmppc_get_gpr(vcpu, ra);
 321
 322                        emulated = kvmppc_handle_store(run, vcpu,
 323                                                       kvmppc_get_gpr(vcpu, rs),
 324                                                       2, 1);
 325                        kvmppc_set_gpr(vcpu, ra, ea);
 326                        break;
 327
 328                case OP_31_XOP_MTSPR:
 329                        sprn = get_sprn(inst);
 330                        rs = get_rs(inst);
 331                        switch (sprn) {
 332                        case SPRN_SRR0:
 333                                vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
 334                                break;
 335                        case SPRN_SRR1:
 336                                vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
 337                                break;
 338
 339                        /* XXX We need to context-switch the timebase for
 340                         * watchdog and FIT. */
 341                        case SPRN_TBWL: break;
 342                        case SPRN_TBWU: break;
 343
 344                        case SPRN_MSSSR0: break;
 345
 346                        case SPRN_DEC:
 347                                vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
 348                                kvmppc_emulate_dec(vcpu);
 349                                break;
 350
 351                        case SPRN_SPRG0:
 352                                vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
 353                                break;
 354                        case SPRN_SPRG1:
 355                                vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
 356                                break;
 357                        case SPRN_SPRG2:
 358                                vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
 359                                break;
 360                        case SPRN_SPRG3:
 361                                vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
 362                                break;
 363
 364                        default:
 365                                emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
 366                                if (emulated == EMULATE_FAIL)
 367                                        printk("mtspr: unknown spr %x\n", sprn);
 368                                break;
 369                        }
 370                        kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
 371                        break;
 372
 373                case OP_31_XOP_DCBI:
 374                        /* Do nothing. The guest is performing dcbi because
 375                         * hardware DMA is not snooped by the dcache, but
 376                         * emulated DMA either goes through the dcache as
 377                         * normal writes, or the host kernel has handled dcache
 378                         * coherence. */
 379                        break;
 380
 381                case OP_31_XOP_LWBRX:
 382                        rt = get_rt(inst);
 383                        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
 384                        break;
 385
 386                case OP_31_XOP_TLBSYNC:
 387                        break;
 388
 389                case OP_31_XOP_STWBRX:
 390                        rs = get_rs(inst);
 391                        ra = get_ra(inst);
 392                        rb = get_rb(inst);
 393
 394                        emulated = kvmppc_handle_store(run, vcpu,
 395                                                       kvmppc_get_gpr(vcpu, rs),
 396                                                       4, 0);
 397                        break;
 398
 399                case OP_31_XOP_LHBRX:
 400                        rt = get_rt(inst);
 401                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
 402                        break;
 403
 404                case OP_31_XOP_STHBRX:
 405                        rs = get_rs(inst);
 406                        ra = get_ra(inst);
 407                        rb = get_rb(inst);
 408
 409                        emulated = kvmppc_handle_store(run, vcpu,
 410                                                       kvmppc_get_gpr(vcpu, rs),
 411                                                       2, 0);
 412                        break;
 413
 414                default:
 415                        /* Attempt core-specific emulation below. */
 416                        emulated = EMULATE_FAIL;
 417                }
 418                break;
 419
 420        case OP_LWZ:
 421                rt = get_rt(inst);
 422                emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
 423                break;
 424
 425        case OP_LWZU:
 426                ra = get_ra(inst);
 427                rt = get_rt(inst);
 428                emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
 429                kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
 430                break;
 431
 432        case OP_LBZ:
 433                rt = get_rt(inst);
 434                emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 435                break;
 436
 437        case OP_LBZU:
 438                ra = get_ra(inst);
 439                rt = get_rt(inst);
 440                emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 441                kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
 442                break;
 443
 444        case OP_STW:
 445                rs = get_rs(inst);
 446                emulated = kvmppc_handle_store(run, vcpu,
 447                                               kvmppc_get_gpr(vcpu, rs),
 448                                               4, 1);
 449                break;
 450
 451        case OP_STWU:
 452                ra = get_ra(inst);
 453                rs = get_rs(inst);
 454                emulated = kvmppc_handle_store(run, vcpu,
 455                                               kvmppc_get_gpr(vcpu, rs),
 456                                               4, 1);
 457                kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
 458                break;
 459
 460        case OP_STB:
 461                rs = get_rs(inst);
 462                emulated = kvmppc_handle_store(run, vcpu,
 463                                               kvmppc_get_gpr(vcpu, rs),
 464                                               1, 1);
 465                break;
 466
 467        case OP_STBU:
 468                ra = get_ra(inst);
 469                rs = get_rs(inst);
 470                emulated = kvmppc_handle_store(run, vcpu,
 471                                               kvmppc_get_gpr(vcpu, rs),
 472                                               1, 1);
 473                kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
 474                break;
 475
 476        case OP_LHZ:
 477                rt = get_rt(inst);
 478                emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
 479                break;
 480
 481        case OP_LHZU:
 482                ra = get_ra(inst);
 483                rt = get_rt(inst);
 484                emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
 485                kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
 486                break;
 487
 488        case OP_LHA:
 489                rt = get_rt(inst);
 490                emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
 491                break;
 492
 493        case OP_LHAU:
 494                ra = get_ra(inst);
 495                rt = get_rt(inst);
 496                emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
 497                kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
 498                break;
 499
 500        case OP_STH:
 501                rs = get_rs(inst);
 502                emulated = kvmppc_handle_store(run, vcpu,
 503                                               kvmppc_get_gpr(vcpu, rs),
 504                                               2, 1);
 505                break;
 506
 507        case OP_STHU:
 508                ra = get_ra(inst);
 509                rs = get_rs(inst);
 510                emulated = kvmppc_handle_store(run, vcpu,
 511                                               kvmppc_get_gpr(vcpu, rs),
 512                                               2, 1);
 513                kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
 514                break;
 515
 516        default:
 517                emulated = EMULATE_FAIL;
 518        }
 519
 520        if (emulated == EMULATE_FAIL) {
 521                emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
 522                if (emulated == EMULATE_AGAIN) {
 523                        advance = 0;
 524                } else if (emulated == EMULATE_FAIL) {
 525                        advance = 0;
 526                        printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
 527                               "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
 528                        kvmppc_core_queue_program(vcpu, 0);
 529                }
 530        }
 531
 532        trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
 533
 534        /* Advance past emulated instruction. */
 535        if (advance)
 536                kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
 537
 538        return emulated;
 539}
 540