linux/arch/powerpc/kvm/emulate.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2007
  16 * Copyright 2011 Freescale Semiconductor, Inc.
  17 *
  18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  19 */
  20
  21#include <linux/jiffies.h>
  22#include <linux/hrtimer.h>
  23#include <linux/types.h>
  24#include <linux/string.h>
  25#include <linux/kvm_host.h>
  26#include <linux/clockchips.h>
  27
  28#include <asm/reg.h>
  29#include <asm/time.h>
  30#include <asm/byteorder.h>
  31#include <asm/kvm_ppc.h>
  32#include <asm/disassemble.h>
  33#include <asm/ppc-opcode.h>
  34#include "timing.h"
  35#include "trace.h"
  36
  37void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
  38{
  39        unsigned long dec_nsec;
  40        unsigned long long dec_time;
  41
  42        pr_debug("mtDEC: %x\n", vcpu->arch.dec);
  43        hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
  44
  45#ifdef CONFIG_PPC_BOOK3S
  46        /* mtdec lowers the interrupt line when positive. */
  47        kvmppc_core_dequeue_dec(vcpu);
  48
  49        /* POWER4+ triggers a dec interrupt if the value is < 0 */
  50        if (vcpu->arch.dec & 0x80000000) {
  51                kvmppc_core_queue_dec(vcpu);
  52                return;
  53        }
  54#endif
  55
  56#ifdef CONFIG_BOOKE
  57        /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
  58        if (vcpu->arch.dec == 0)
  59                return;
  60#endif
  61
  62        /*
  63         * The decrementer ticks at the same rate as the timebase, so
  64         * that's how we convert the guest DEC value to the number of
  65         * host ticks.
  66         */
  67
  68        dec_time = vcpu->arch.dec;
  69        /*
  70         * Guest timebase ticks at the same frequency as host decrementer.
  71         * So use the host decrementer calculations for decrementer emulation.
  72         */
  73        dec_time = dec_time << decrementer_clockevent.shift;
  74        do_div(dec_time, decrementer_clockevent.mult);
  75        dec_nsec = do_div(dec_time, NSEC_PER_SEC);
  76        hrtimer_start(&vcpu->arch.dec_timer,
  77                ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
  78        vcpu->arch.dec_jiffies = get_tb();
  79}
  80
  81u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
  82{
  83        u64 jd = tb - vcpu->arch.dec_jiffies;
  84
  85#ifdef CONFIG_BOOKE
  86        if (vcpu->arch.dec < jd)
  87                return 0;
  88#endif
  89
  90        return vcpu->arch.dec - jd;
  91}
  92
  93static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
  94{
  95        enum emulation_result emulated = EMULATE_DONE;
  96        ulong spr_val = kvmppc_get_gpr(vcpu, rs);
  97
  98        switch (sprn) {
  99        case SPRN_SRR0:
 100                vcpu->arch.shared->srr0 = spr_val;
 101                break;
 102        case SPRN_SRR1:
 103                vcpu->arch.shared->srr1 = spr_val;
 104                break;
 105
 106        /* XXX We need to context-switch the timebase for
 107         * watchdog and FIT. */
 108        case SPRN_TBWL: break;
 109        case SPRN_TBWU: break;
 110
 111        case SPRN_DEC:
 112                vcpu->arch.dec = spr_val;
 113                kvmppc_emulate_dec(vcpu);
 114                break;
 115
 116        case SPRN_SPRG0:
 117                vcpu->arch.shared->sprg0 = spr_val;
 118                break;
 119        case SPRN_SPRG1:
 120                vcpu->arch.shared->sprg1 = spr_val;
 121                break;
 122        case SPRN_SPRG2:
 123                vcpu->arch.shared->sprg2 = spr_val;
 124                break;
 125        case SPRN_SPRG3:
 126                vcpu->arch.shared->sprg3 = spr_val;
 127                break;
 128
 129        /* PIR can legally be written, but we ignore it */
 130        case SPRN_PIR: break;
 131
 132        default:
 133                emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
 134                                                     spr_val);
 135                if (emulated == EMULATE_FAIL)
 136                        printk(KERN_INFO "mtspr: unknown spr "
 137                                "0x%x\n", sprn);
 138                break;
 139        }
 140
 141        kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
 142
 143        return emulated;
 144}
 145
 146static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
 147{
 148        enum emulation_result emulated = EMULATE_DONE;
 149        ulong spr_val = 0;
 150
 151        switch (sprn) {
 152        case SPRN_SRR0:
 153                spr_val = vcpu->arch.shared->srr0;
 154                break;
 155        case SPRN_SRR1:
 156                spr_val = vcpu->arch.shared->srr1;
 157                break;
 158        case SPRN_PVR:
 159                spr_val = vcpu->arch.pvr;
 160                break;
 161        case SPRN_PIR:
 162                spr_val = vcpu->vcpu_id;
 163                break;
 164
 165        /* Note: mftb and TBRL/TBWL are user-accessible, so
 166         * the guest can always access the real TB anyways.
 167         * In fact, we probably will never see these traps. */
 168        case SPRN_TBWL:
 169                spr_val = get_tb() >> 32;
 170                break;
 171        case SPRN_TBWU:
 172                spr_val = get_tb();
 173                break;
 174
 175        case SPRN_SPRG0:
 176                spr_val = vcpu->arch.shared->sprg0;
 177                break;
 178        case SPRN_SPRG1:
 179                spr_val = vcpu->arch.shared->sprg1;
 180                break;
 181        case SPRN_SPRG2:
 182                spr_val = vcpu->arch.shared->sprg2;
 183                break;
 184        case SPRN_SPRG3:
 185                spr_val = vcpu->arch.shared->sprg3;
 186                break;
 187        /* Note: SPRG4-7 are user-readable, so we don't get
 188         * a trap. */
 189
 190        case SPRN_DEC:
 191                spr_val = kvmppc_get_dec(vcpu, get_tb());
 192                break;
 193        default:
 194                emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
 195                                                     &spr_val);
 196                if (unlikely(emulated == EMULATE_FAIL)) {
 197                        printk(KERN_INFO "mfspr: unknown spr "
 198                                "0x%x\n", sprn);
 199                }
 200                break;
 201        }
 202
 203        if (emulated == EMULATE_DONE)
 204                kvmppc_set_gpr(vcpu, rt, spr_val);
 205        kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
 206
 207        return emulated;
 208}
 209
 210/* XXX to do:
 211 * lhax
 212 * lhaux
 213 * lswx
 214 * lswi
 215 * stswx
 216 * stswi
 217 * lha
 218 * lhau
 219 * lmw
 220 * stmw
 221 *
 222 * XXX is_bigendian should depend on MMU mapping or MSR[LE]
 223 */
 224/* XXX Should probably auto-generate instruction decoding for a particular core
 225 * from opcode tables in the future. */
 226int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
 227{
 228        u32 inst = kvmppc_get_last_inst(vcpu);
 229        int ra = get_ra(inst);
 230        int rs = get_rs(inst);
 231        int rt = get_rt(inst);
 232        int sprn = get_sprn(inst);
 233        enum emulation_result emulated = EMULATE_DONE;
 234        int advance = 1;
 235
 236        /* this default type might be overwritten by subcategories */
 237        kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
 238
 239        pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
 240
 241        switch (get_op(inst)) {
 242        case OP_TRAP:
 243#ifdef CONFIG_PPC_BOOK3S
 244        case OP_TRAP_64:
 245                kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
 246#else
 247                kvmppc_core_queue_program(vcpu,
 248                                          vcpu->arch.shared->esr | ESR_PTR);
 249#endif
 250                advance = 0;
 251                break;
 252
 253        case 31:
 254                switch (get_xop(inst)) {
 255
 256                case OP_31_XOP_TRAP:
 257#ifdef CONFIG_64BIT
 258                case OP_31_XOP_TRAP_64:
 259#endif
 260#ifdef CONFIG_PPC_BOOK3S
 261                        kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
 262#else
 263                        kvmppc_core_queue_program(vcpu,
 264                                        vcpu->arch.shared->esr | ESR_PTR);
 265#endif
 266                        advance = 0;
 267                        break;
 268                case OP_31_XOP_LWZX:
 269                        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
 270                        break;
 271
 272                case OP_31_XOP_LBZX:
 273                        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 274                        break;
 275
 276                case OP_31_XOP_LBZUX:
 277                        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 278                        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 279                        break;
 280
 281                case OP_31_XOP_STWX:
 282                        emulated = kvmppc_handle_store(run, vcpu,
 283                                                       kvmppc_get_gpr(vcpu, rs),
 284                                                       4, 1);
 285                        break;
 286
 287                case OP_31_XOP_STBX:
 288                        emulated = kvmppc_handle_store(run, vcpu,
 289                                                       kvmppc_get_gpr(vcpu, rs),
 290                                                       1, 1);
 291                        break;
 292
 293                case OP_31_XOP_STBUX:
 294                        emulated = kvmppc_handle_store(run, vcpu,
 295                                                       kvmppc_get_gpr(vcpu, rs),
 296                                                       1, 1);
 297                        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 298                        break;
 299
 300                case OP_31_XOP_LHAX:
 301                        emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
 302                        break;
 303
 304                case OP_31_XOP_LHZX:
 305                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
 306                        break;
 307
 308                case OP_31_XOP_LHZUX:
 309                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
 310                        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 311                        break;
 312
 313                case OP_31_XOP_MFSPR:
 314                        emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
 315                        break;
 316
 317                case OP_31_XOP_STHX:
 318                        emulated = kvmppc_handle_store(run, vcpu,
 319                                                       kvmppc_get_gpr(vcpu, rs),
 320                                                       2, 1);
 321                        break;
 322
 323                case OP_31_XOP_STHUX:
 324                        emulated = kvmppc_handle_store(run, vcpu,
 325                                                       kvmppc_get_gpr(vcpu, rs),
 326                                                       2, 1);
 327                        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 328                        break;
 329
 330                case OP_31_XOP_MTSPR:
 331                        emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
 332                        break;
 333
 334                case OP_31_XOP_DCBST:
 335                case OP_31_XOP_DCBF:
 336                case OP_31_XOP_DCBI:
 337                        /* Do nothing. The guest is performing dcbi because
 338                         * hardware DMA is not snooped by the dcache, but
 339                         * emulated DMA either goes through the dcache as
 340                         * normal writes, or the host kernel has handled dcache
 341                         * coherence. */
 342                        break;
 343
 344                case OP_31_XOP_LWBRX:
 345                        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
 346                        break;
 347
 348                case OP_31_XOP_TLBSYNC:
 349                        break;
 350
 351                case OP_31_XOP_STWBRX:
 352                        emulated = kvmppc_handle_store(run, vcpu,
 353                                                       kvmppc_get_gpr(vcpu, rs),
 354                                                       4, 0);
 355                        break;
 356
 357                case OP_31_XOP_LHBRX:
 358                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
 359                        break;
 360
 361                case OP_31_XOP_STHBRX:
 362                        emulated = kvmppc_handle_store(run, vcpu,
 363                                                       kvmppc_get_gpr(vcpu, rs),
 364                                                       2, 0);
 365                        break;
 366
 367                default:
 368                        /* Attempt core-specific emulation below. */
 369                        emulated = EMULATE_FAIL;
 370                }
 371                break;
 372
 373        case OP_LWZ:
 374                emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
 375                break;
 376
 377        /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
 378        case OP_LD:
 379                rt = get_rt(inst);
 380                emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
 381                break;
 382
 383        case OP_LWZU:
 384                emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
 385                kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 386                break;
 387
 388        case OP_LBZ:
 389                emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 390                break;
 391
 392        case OP_LBZU:
 393                emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
 394                kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 395                break;
 396
 397        case OP_STW:
 398                emulated = kvmppc_handle_store(run, vcpu,
 399                                               kvmppc_get_gpr(vcpu, rs),
 400                                               4, 1);
 401                break;
 402
 403        /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
 404        case OP_STD:
 405                rs = get_rs(inst);
 406                emulated = kvmppc_handle_store(run, vcpu,
 407                                               kvmppc_get_gpr(vcpu, rs),
 408                                               8, 1);
 409                break;
 410
 411        case OP_STWU:
 412                emulated = kvmppc_handle_store(run, vcpu,
 413                                               kvmppc_get_gpr(vcpu, rs),
 414                                               4, 1);
 415                kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 416                break;
 417
 418        case OP_STB:
 419                emulated = kvmppc_handle_store(run, vcpu,
 420                                               kvmppc_get_gpr(vcpu, rs),
 421                                               1, 1);
 422                break;
 423
 424        case OP_STBU:
 425                emulated = kvmppc_handle_store(run, vcpu,
 426                                               kvmppc_get_gpr(vcpu, rs),
 427                                               1, 1);
 428                kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 429                break;
 430
 431        case OP_LHZ:
 432                emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
 433                break;
 434
 435        case OP_LHZU:
 436                emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
 437                kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 438                break;
 439
 440        case OP_LHA:
 441                emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
 442                break;
 443
 444        case OP_LHAU:
 445                emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
 446                kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 447                break;
 448
 449        case OP_STH:
 450                emulated = kvmppc_handle_store(run, vcpu,
 451                                               kvmppc_get_gpr(vcpu, rs),
 452                                               2, 1);
 453                break;
 454
 455        case OP_STHU:
 456                emulated = kvmppc_handle_store(run, vcpu,
 457                                               kvmppc_get_gpr(vcpu, rs),
 458                                               2, 1);
 459                kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
 460                break;
 461
 462        default:
 463                emulated = EMULATE_FAIL;
 464        }
 465
 466        if (emulated == EMULATE_FAIL) {
 467                emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
 468                if (emulated == EMULATE_AGAIN) {
 469                        advance = 0;
 470                } else if (emulated == EMULATE_FAIL) {
 471                        advance = 0;
 472                        printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
 473                               "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
 474                        kvmppc_core_queue_program(vcpu, 0);
 475                }
 476        }
 477
 478        trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
 479
 480        /* Advance past emulated instruction. */
 481        if (advance)
 482                kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
 483
 484        return emulated;
 485}
 486