linux/arch/mips/kvm/emulate.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: Instruction/Exception emulation
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10 */
  11
  12#include <linux/errno.h>
  13#include <linux/err.h>
  14#include <linux/ktime.h>
  15#include <linux/kvm_host.h>
  16#include <linux/module.h>
  17#include <linux/vmalloc.h>
  18#include <linux/fs.h>
  19#include <linux/bootmem.h>
  20#include <linux/random.h>
  21#include <asm/page.h>
  22#include <asm/cacheflush.h>
  23#include <asm/cacheops.h>
  24#include <asm/cpu-info.h>
  25#include <asm/mmu_context.h>
  26#include <asm/tlbflush.h>
  27#include <asm/inst.h>
  28
  29#undef CONFIG_MIPS_MT
  30#include <asm/r4kcache.h>
  31#define CONFIG_MIPS_MT
  32
  33#include "interrupt.h"
  34#include "commpage.h"
  35
  36#include "trace.h"
  37
  38/*
  39 * Compute the return address and do emulate branch simulation, if required.
  40 * This function should be called only in branch delay slot active.
  41 */
  42unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
  43        unsigned long instpc)
  44{
  45        unsigned int dspcontrol;
  46        union mips_instruction insn;
  47        struct kvm_vcpu_arch *arch = &vcpu->arch;
  48        long epc = instpc;
  49        long nextpc = KVM_INVALID_INST;
  50
  51        if (epc & 3)
  52                goto unaligned;
  53
  54        /* Read the instruction */
  55        insn.word = kvm_get_inst((u32 *) epc, vcpu);
  56
  57        if (insn.word == KVM_INVALID_INST)
  58                return KVM_INVALID_INST;
  59
  60        switch (insn.i_format.opcode) {
  61                /* jr and jalr are in r_format format. */
  62        case spec_op:
  63                switch (insn.r_format.func) {
  64                case jalr_op:
  65                        arch->gprs[insn.r_format.rd] = epc + 8;
  66                        /* Fall through */
  67                case jr_op:
  68                        nextpc = arch->gprs[insn.r_format.rs];
  69                        break;
  70                }
  71                break;
  72
  73                /*
  74                 * This group contains:
  75                 * bltz_op, bgez_op, bltzl_op, bgezl_op,
  76                 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
  77                 */
  78        case bcond_op:
  79                switch (insn.i_format.rt) {
  80                case bltz_op:
  81                case bltzl_op:
  82                        if ((long)arch->gprs[insn.i_format.rs] < 0)
  83                                epc = epc + 4 + (insn.i_format.simmediate << 2);
  84                        else
  85                                epc += 8;
  86                        nextpc = epc;
  87                        break;
  88
  89                case bgez_op:
  90                case bgezl_op:
  91                        if ((long)arch->gprs[insn.i_format.rs] >= 0)
  92                                epc = epc + 4 + (insn.i_format.simmediate << 2);
  93                        else
  94                                epc += 8;
  95                        nextpc = epc;
  96                        break;
  97
  98                case bltzal_op:
  99                case bltzall_op:
 100                        arch->gprs[31] = epc + 8;
 101                        if ((long)arch->gprs[insn.i_format.rs] < 0)
 102                                epc = epc + 4 + (insn.i_format.simmediate << 2);
 103                        else
 104                                epc += 8;
 105                        nextpc = epc;
 106                        break;
 107
 108                case bgezal_op:
 109                case bgezall_op:
 110                        arch->gprs[31] = epc + 8;
 111                        if ((long)arch->gprs[insn.i_format.rs] >= 0)
 112                                epc = epc + 4 + (insn.i_format.simmediate << 2);
 113                        else
 114                                epc += 8;
 115                        nextpc = epc;
 116                        break;
 117                case bposge32_op:
 118                        if (!cpu_has_dsp)
 119                                goto sigill;
 120
 121                        dspcontrol = rddsp(0x01);
 122
 123                        if (dspcontrol >= 32)
 124                                epc = epc + 4 + (insn.i_format.simmediate << 2);
 125                        else
 126                                epc += 8;
 127                        nextpc = epc;
 128                        break;
 129                }
 130                break;
 131
 132                /* These are unconditional and in j_format. */
 133        case jal_op:
 134                arch->gprs[31] = instpc + 8;
 135        case j_op:
 136                epc += 4;
 137                epc >>= 28;
 138                epc <<= 28;
 139                epc |= (insn.j_format.target << 2);
 140                nextpc = epc;
 141                break;
 142
 143                /* These are conditional and in i_format. */
 144        case beq_op:
 145        case beql_op:
 146                if (arch->gprs[insn.i_format.rs] ==
 147                    arch->gprs[insn.i_format.rt])
 148                        epc = epc + 4 + (insn.i_format.simmediate << 2);
 149                else
 150                        epc += 8;
 151                nextpc = epc;
 152                break;
 153
 154        case bne_op:
 155        case bnel_op:
 156                if (arch->gprs[insn.i_format.rs] !=
 157                    arch->gprs[insn.i_format.rt])
 158                        epc = epc + 4 + (insn.i_format.simmediate << 2);
 159                else
 160                        epc += 8;
 161                nextpc = epc;
 162                break;
 163
 164        case blez_op:   /* POP06 */
 165#ifndef CONFIG_CPU_MIPSR6
 166        case blezl_op:  /* removed in R6 */
 167#endif
 168                if (insn.i_format.rt != 0)
 169                        goto compact_branch;
 170                if ((long)arch->gprs[insn.i_format.rs] <= 0)
 171                        epc = epc + 4 + (insn.i_format.simmediate << 2);
 172                else
 173                        epc += 8;
 174                nextpc = epc;
 175                break;
 176
 177        case bgtz_op:   /* POP07 */
 178#ifndef CONFIG_CPU_MIPSR6
 179        case bgtzl_op:  /* removed in R6 */
 180#endif
 181                if (insn.i_format.rt != 0)
 182                        goto compact_branch;
 183                if ((long)arch->gprs[insn.i_format.rs] > 0)
 184                        epc = epc + 4 + (insn.i_format.simmediate << 2);
 185                else
 186                        epc += 8;
 187                nextpc = epc;
 188                break;
 189
 190                /* And now the FPA/cp1 branch instructions. */
 191        case cop1_op:
 192                kvm_err("%s: unsupported cop1_op\n", __func__);
 193                break;
 194
 195#ifdef CONFIG_CPU_MIPSR6
 196        /* R6 added the following compact branches with forbidden slots */
 197        case blezl_op:  /* POP26 */
 198        case bgtzl_op:  /* POP27 */
 199                /* only rt == 0 isn't compact branch */
 200                if (insn.i_format.rt != 0)
 201                        goto compact_branch;
 202                break;
 203        case pop10_op:
 204        case pop30_op:
 205                /* only rs == rt == 0 is reserved, rest are compact branches */
 206                if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
 207                        goto compact_branch;
 208                break;
 209        case pop66_op:
 210        case pop76_op:
 211                /* only rs == 0 isn't compact branch */
 212                if (insn.i_format.rs != 0)
 213                        goto compact_branch;
 214                break;
 215compact_branch:
 216                /*
 217                 * If we've hit an exception on the forbidden slot, then
 218                 * the branch must not have been taken.
 219                 */
 220                epc += 8;
 221                nextpc = epc;
 222                break;
 223#else
 224compact_branch:
 225                /* Compact branches not supported before R6 */
 226                break;
 227#endif
 228        }
 229
 230        return nextpc;
 231
 232unaligned:
 233        kvm_err("%s: unaligned epc\n", __func__);
 234        return nextpc;
 235
 236sigill:
 237        kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
 238        return nextpc;
 239}
 240
 241enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
 242{
 243        unsigned long branch_pc;
 244        enum emulation_result er = EMULATE_DONE;
 245
 246        if (cause & CAUSEF_BD) {
 247                branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
 248                if (branch_pc == KVM_INVALID_INST) {
 249                        er = EMULATE_FAIL;
 250                } else {
 251                        vcpu->arch.pc = branch_pc;
 252                        kvm_debug("BD update_pc(): New PC: %#lx\n",
 253                                  vcpu->arch.pc);
 254                }
 255        } else
 256                vcpu->arch.pc += 4;
 257
 258        kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
 259
 260        return er;
 261}
 262
 263/**
 264 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
 265 * @vcpu:       Virtual CPU.
 266 *
 267 * Returns:     1 if the CP0_Count timer is disabled by either the guest
 268 *              CP0_Cause.DC bit or the count_ctl.DC bit.
 269 *              0 otherwise (in which case CP0_Count timer is running).
 270 */
 271static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
 272{
 273        struct mips_coproc *cop0 = vcpu->arch.cop0;
 274
 275        return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
 276                (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
 277}
 278
 279/**
 280 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
 281 *
 282 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
 283 *
 284 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 285 */
 286static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
 287{
 288        s64 now_ns, periods;
 289        u64 delta;
 290
 291        now_ns = ktime_to_ns(now);
 292        delta = now_ns + vcpu->arch.count_dyn_bias;
 293
 294        if (delta >= vcpu->arch.count_period) {
 295                /* If delta is out of safe range the bias needs adjusting */
 296                periods = div64_s64(now_ns, vcpu->arch.count_period);
 297                vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
 298                /* Recalculate delta with new bias */
 299                delta = now_ns + vcpu->arch.count_dyn_bias;
 300        }
 301
 302        /*
 303         * We've ensured that:
 304         *   delta < count_period
 305         *
 306         * Therefore the intermediate delta*count_hz will never overflow since
 307         * at the boundary condition:
 308         *   delta = count_period
 309         *   delta = NSEC_PER_SEC * 2^32 / count_hz
 310         *   delta * count_hz = NSEC_PER_SEC * 2^32
 311         */
 312        return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
 313}
 314
 315/**
 316 * kvm_mips_count_time() - Get effective current time.
 317 * @vcpu:       Virtual CPU.
 318 *
 319 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
 320 * except when the master disable bit is set in count_ctl, in which case it is
 321 * count_resume, i.e. the time that the count was disabled.
 322 *
 323 * Returns:     Effective monotonic ktime for CP0_Count.
 324 */
 325static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
 326{
 327        if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
 328                return vcpu->arch.count_resume;
 329
 330        return ktime_get();
 331}
 332
 333/**
 334 * kvm_mips_read_count_running() - Read the current count value as if running.
 335 * @vcpu:       Virtual CPU.
 336 * @now:        Kernel time to read CP0_Count at.
 337 *
 338 * Returns the current guest CP0_Count register at time @now and handles if the
 339 * timer interrupt is pending and hasn't been handled yet.
 340 *
 341 * Returns:     The current value of the guest CP0_Count register.
 342 */
 343static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
 344{
 345        struct mips_coproc *cop0 = vcpu->arch.cop0;
 346        ktime_t expires, threshold;
 347        u32 count, compare;
 348        int running;
 349
 350        /* Calculate the biased and scaled guest CP0_Count */
 351        count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
 352        compare = kvm_read_c0_guest_compare(cop0);
 353
 354        /*
 355         * Find whether CP0_Count has reached the closest timer interrupt. If
 356         * not, we shouldn't inject it.
 357         */
 358        if ((s32)(count - compare) < 0)
 359                return count;
 360
 361        /*
 362         * The CP0_Count we're going to return has already reached the closest
 363         * timer interrupt. Quickly check if it really is a new interrupt by
 364         * looking at whether the interval until the hrtimer expiry time is
 365         * less than 1/4 of the timer period.
 366         */
 367        expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
 368        threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
 369        if (ktime_before(expires, threshold)) {
 370                /*
 371                 * Cancel it while we handle it so there's no chance of
 372                 * interference with the timeout handler.
 373                 */
 374                running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
 375
 376                /* Nothing should be waiting on the timeout */
 377                kvm_mips_callbacks->queue_timer_int(vcpu);
 378
 379                /*
 380                 * Restart the timer if it was running based on the expiry time
 381                 * we read, so that we don't push it back 2 periods.
 382                 */
 383                if (running) {
 384                        expires = ktime_add_ns(expires,
 385                                               vcpu->arch.count_period);
 386                        hrtimer_start(&vcpu->arch.comparecount_timer, expires,
 387                                      HRTIMER_MODE_ABS);
 388                }
 389        }
 390
 391        return count;
 392}
 393
 394/**
 395 * kvm_mips_read_count() - Read the current count value.
 396 * @vcpu:       Virtual CPU.
 397 *
 398 * Read the current guest CP0_Count value, taking into account whether the timer
 399 * is stopped.
 400 *
 401 * Returns:     The current guest CP0_Count value.
 402 */
 403u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
 404{
 405        struct mips_coproc *cop0 = vcpu->arch.cop0;
 406
 407        /* If count disabled just read static copy of count */
 408        if (kvm_mips_count_disabled(vcpu))
 409                return kvm_read_c0_guest_count(cop0);
 410
 411        return kvm_mips_read_count_running(vcpu, ktime_get());
 412}
 413
 414/**
 415 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
 416 * @vcpu:       Virtual CPU.
 417 * @count:      Output pointer for CP0_Count value at point of freeze.
 418 *
 419 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
 420 * at the point it was frozen. It is guaranteed that any pending interrupts at
 421 * the point it was frozen are handled, and none after that point.
 422 *
 423 * This is useful where the time/CP0_Count is needed in the calculation of the
 424 * new parameters.
 425 *
 426 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 427 *
 428 * Returns:     The ktime at the point of freeze.
 429 */
 430static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
 431{
 432        ktime_t now;
 433
 434        /* stop hrtimer before finding time */
 435        hrtimer_cancel(&vcpu->arch.comparecount_timer);
 436        now = ktime_get();
 437
 438        /* find count at this point and handle pending hrtimer */
 439        *count = kvm_mips_read_count_running(vcpu, now);
 440
 441        return now;
 442}
 443
 444/**
 445 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
 446 * @vcpu:       Virtual CPU.
 447 * @now:        ktime at point of resume.
 448 * @count:      CP0_Count at point of resume.
 449 *
 450 * Resumes the timer and updates the timer expiry based on @now and @count.
 451 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
 452 * parameters need to be changed.
 453 *
 454 * It is guaranteed that a timer interrupt immediately after resume will be
 455 * handled, but not if CP_Compare is exactly at @count. That case is already
 456 * handled by kvm_mips_freeze_timer().
 457 *
 458 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 459 */
 460static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
 461                                    ktime_t now, u32 count)
 462{
 463        struct mips_coproc *cop0 = vcpu->arch.cop0;
 464        u32 compare;
 465        u64 delta;
 466        ktime_t expire;
 467
 468        /* Calculate timeout (wrap 0 to 2^32) */
 469        compare = kvm_read_c0_guest_compare(cop0);
 470        delta = (u64)(u32)(compare - count - 1) + 1;
 471        delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
 472        expire = ktime_add_ns(now, delta);
 473
 474        /* Update hrtimer to use new timeout */
 475        hrtimer_cancel(&vcpu->arch.comparecount_timer);
 476        hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
 477}
 478
 479/**
 480 * kvm_mips_write_count() - Modify the count and update timer.
 481 * @vcpu:       Virtual CPU.
 482 * @count:      Guest CP0_Count value to set.
 483 *
 484 * Sets the CP0_Count value and updates the timer accordingly.
 485 */
 486void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
 487{
 488        struct mips_coproc *cop0 = vcpu->arch.cop0;
 489        ktime_t now;
 490
 491        /* Calculate bias */
 492        now = kvm_mips_count_time(vcpu);
 493        vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
 494
 495        if (kvm_mips_count_disabled(vcpu))
 496                /* The timer's disabled, adjust the static count */
 497                kvm_write_c0_guest_count(cop0, count);
 498        else
 499                /* Update timeout */
 500                kvm_mips_resume_hrtimer(vcpu, now, count);
 501}
 502
 503/**
 504 * kvm_mips_init_count() - Initialise timer.
 505 * @vcpu:       Virtual CPU.
 506 *
 507 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
 508 * it going if it's enabled.
 509 */
 510void kvm_mips_init_count(struct kvm_vcpu *vcpu)
 511{
 512        /* 100 MHz */
 513        vcpu->arch.count_hz = 100*1000*1000;
 514        vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
 515                                          vcpu->arch.count_hz);
 516        vcpu->arch.count_dyn_bias = 0;
 517
 518        /* Starting at 0 */
 519        kvm_mips_write_count(vcpu, 0);
 520}
 521
 522/**
 523 * kvm_mips_set_count_hz() - Update the frequency of the timer.
 524 * @vcpu:       Virtual CPU.
 525 * @count_hz:   Frequency of CP0_Count timer in Hz.
 526 *
 527 * Change the frequency of the CP0_Count timer. This is done atomically so that
 528 * CP0_Count is continuous and no timer interrupt is lost.
 529 *
 530 * Returns:     -EINVAL if @count_hz is out of range.
 531 *              0 on success.
 532 */
 533int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
 534{
 535        struct mips_coproc *cop0 = vcpu->arch.cop0;
 536        int dc;
 537        ktime_t now;
 538        u32 count;
 539
 540        /* ensure the frequency is in a sensible range... */
 541        if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
 542                return -EINVAL;
 543        /* ... and has actually changed */
 544        if (vcpu->arch.count_hz == count_hz)
 545                return 0;
 546
 547        /* Safely freeze timer so we can keep it continuous */
 548        dc = kvm_mips_count_disabled(vcpu);
 549        if (dc) {
 550                now = kvm_mips_count_time(vcpu);
 551                count = kvm_read_c0_guest_count(cop0);
 552        } else {
 553                now = kvm_mips_freeze_hrtimer(vcpu, &count);
 554        }
 555
 556        /* Update the frequency */
 557        vcpu->arch.count_hz = count_hz;
 558        vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
 559        vcpu->arch.count_dyn_bias = 0;
 560
 561        /* Calculate adjusted bias so dynamic count is unchanged */
 562        vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
 563
 564        /* Update and resume hrtimer */
 565        if (!dc)
 566                kvm_mips_resume_hrtimer(vcpu, now, count);
 567        return 0;
 568}
 569
 570/**
 571 * kvm_mips_write_compare() - Modify compare and update timer.
 572 * @vcpu:       Virtual CPU.
 573 * @compare:    New CP0_Compare value.
 574 * @ack:        Whether to acknowledge timer interrupt.
 575 *
 576 * Update CP0_Compare to a new value and update the timeout.
 577 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
 578 * any pending timer interrupt is preserved.
 579 */
 580void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
 581{
 582        struct mips_coproc *cop0 = vcpu->arch.cop0;
 583        int dc;
 584        u32 old_compare = kvm_read_c0_guest_compare(cop0);
 585        ktime_t now;
 586        u32 count;
 587
 588        /* if unchanged, must just be an ack */
 589        if (old_compare == compare) {
 590                if (!ack)
 591                        return;
 592                kvm_mips_callbacks->dequeue_timer_int(vcpu);
 593                kvm_write_c0_guest_compare(cop0, compare);
 594                return;
 595        }
 596
 597        /* freeze_hrtimer() takes care of timer interrupts <= count */
 598        dc = kvm_mips_count_disabled(vcpu);
 599        if (!dc)
 600                now = kvm_mips_freeze_hrtimer(vcpu, &count);
 601
 602        if (ack)
 603                kvm_mips_callbacks->dequeue_timer_int(vcpu);
 604
 605        kvm_write_c0_guest_compare(cop0, compare);
 606
 607        /* resume_hrtimer() takes care of timer interrupts > count */
 608        if (!dc)
 609                kvm_mips_resume_hrtimer(vcpu, now, count);
 610}
 611
 612/**
 613 * kvm_mips_count_disable() - Disable count.
 614 * @vcpu:       Virtual CPU.
 615 *
 616 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
 617 * time will be handled but not after.
 618 *
 619 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
 620 * count_ctl.DC has been set (count disabled).
 621 *
 622 * Returns:     The time that the timer was stopped.
 623 */
 624static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
 625{
 626        struct mips_coproc *cop0 = vcpu->arch.cop0;
 627        u32 count;
 628        ktime_t now;
 629
 630        /* Stop hrtimer */
 631        hrtimer_cancel(&vcpu->arch.comparecount_timer);
 632
 633        /* Set the static count from the dynamic count, handling pending TI */
 634        now = ktime_get();
 635        count = kvm_mips_read_count_running(vcpu, now);
 636        kvm_write_c0_guest_count(cop0, count);
 637
 638        return now;
 639}
 640
 641/**
 642 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
 643 * @vcpu:       Virtual CPU.
 644 *
 645 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
 646 * before the final stop time will be handled if the timer isn't disabled by
 647 * count_ctl.DC, but not after.
 648 *
 649 * Assumes CP0_Cause.DC is clear (count enabled).
 650 */
 651void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
 652{
 653        struct mips_coproc *cop0 = vcpu->arch.cop0;
 654
 655        kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
 656        if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
 657                kvm_mips_count_disable(vcpu);
 658}
 659
 660/**
 661 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
 662 * @vcpu:       Virtual CPU.
 663 *
 664 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
 665 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
 666 * potentially before even returning, so the caller should be careful with
 667 * ordering of CP0_Cause modifications so as not to lose it.
 668 *
 669 * Assumes CP0_Cause.DC is set (count disabled).
 670 */
 671void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
 672{
 673        struct mips_coproc *cop0 = vcpu->arch.cop0;
 674        u32 count;
 675
 676        kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
 677
 678        /*
 679         * Set the dynamic count to match the static count.
 680         * This starts the hrtimer if count_ctl.DC allows it.
 681         * Otherwise it conveniently updates the biases.
 682         */
 683        count = kvm_read_c0_guest_count(cop0);
 684        kvm_mips_write_count(vcpu, count);
 685}
 686
 687/**
 688 * kvm_mips_set_count_ctl() - Update the count control KVM register.
 689 * @vcpu:       Virtual CPU.
 690 * @count_ctl:  Count control register new value.
 691 *
 692 * Set the count control KVM register. The timer is updated accordingly.
 693 *
 694 * Returns:     -EINVAL if reserved bits are set.
 695 *              0 on success.
 696 */
 697int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
 698{
 699        struct mips_coproc *cop0 = vcpu->arch.cop0;
 700        s64 changed = count_ctl ^ vcpu->arch.count_ctl;
 701        s64 delta;
 702        ktime_t expire, now;
 703        u32 count, compare;
 704
 705        /* Only allow defined bits to be changed */
 706        if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
 707                return -EINVAL;
 708
 709        /* Apply new value */
 710        vcpu->arch.count_ctl = count_ctl;
 711
 712        /* Master CP0_Count disable */
 713        if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
 714                /* Is CP0_Cause.DC already disabling CP0_Count? */
 715                if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
 716                        if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
 717                                /* Just record the current time */
 718                                vcpu->arch.count_resume = ktime_get();
 719                } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
 720                        /* disable timer and record current time */
 721                        vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
 722                } else {
 723                        /*
 724                         * Calculate timeout relative to static count at resume
 725                         * time (wrap 0 to 2^32).
 726                         */
 727                        count = kvm_read_c0_guest_count(cop0);
 728                        compare = kvm_read_c0_guest_compare(cop0);
 729                        delta = (u64)(u32)(compare - count - 1) + 1;
 730                        delta = div_u64(delta * NSEC_PER_SEC,
 731                                        vcpu->arch.count_hz);
 732                        expire = ktime_add_ns(vcpu->arch.count_resume, delta);
 733
 734                        /* Handle pending interrupt */
 735                        now = ktime_get();
 736                        if (ktime_compare(now, expire) >= 0)
 737                                /* Nothing should be waiting on the timeout */
 738                                kvm_mips_callbacks->queue_timer_int(vcpu);
 739
 740                        /* Resume hrtimer without changing bias */
 741                        count = kvm_mips_read_count_running(vcpu, now);
 742                        kvm_mips_resume_hrtimer(vcpu, now, count);
 743                }
 744        }
 745
 746        return 0;
 747}
 748
 749/**
 750 * kvm_mips_set_count_resume() - Update the count resume KVM register.
 751 * @vcpu:               Virtual CPU.
 752 * @count_resume:       Count resume register new value.
 753 *
 754 * Set the count resume KVM register.
 755 *
 756 * Returns:     -EINVAL if out of valid range (0..now).
 757 *              0 on success.
 758 */
 759int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
 760{
 761        /*
 762         * It doesn't make sense for the resume time to be in the future, as it
 763         * would be possible for the next interrupt to be more than a full
 764         * period in the future.
 765         */
 766        if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
 767                return -EINVAL;
 768
 769        vcpu->arch.count_resume = ns_to_ktime(count_resume);
 770        return 0;
 771}
 772
 773/**
 774 * kvm_mips_count_timeout() - Push timer forward on timeout.
 775 * @vcpu:       Virtual CPU.
 776 *
 777 * Handle an hrtimer event by push the hrtimer forward a period.
 778 *
 779 * Returns:     The hrtimer_restart value to return to the hrtimer subsystem.
 780 */
 781enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
 782{
 783        /* Add the Count period to the current expiry time */
 784        hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
 785                               vcpu->arch.count_period);
 786        return HRTIMER_RESTART;
 787}
 788
 789enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
 790{
 791        struct mips_coproc *cop0 = vcpu->arch.cop0;
 792        enum emulation_result er = EMULATE_DONE;
 793
 794        if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
 795                kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
 796                          kvm_read_c0_guest_epc(cop0));
 797                kvm_clear_c0_guest_status(cop0, ST0_EXL);
 798                vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
 799
 800        } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
 801                kvm_clear_c0_guest_status(cop0, ST0_ERL);
 802                vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
 803        } else {
 804                kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
 805                        vcpu->arch.pc);
 806                er = EMULATE_FAIL;
 807        }
 808
 809        return er;
 810}
 811
 812enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
 813{
 814        kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
 815                  vcpu->arch.pending_exceptions);
 816
 817        ++vcpu->stat.wait_exits;
 818        trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
 819        if (!vcpu->arch.pending_exceptions) {
 820                vcpu->arch.wait = 1;
 821                kvm_vcpu_block(vcpu);
 822
 823                /*
 824                 * We we are runnable, then definitely go off to user space to
 825                 * check if any I/O interrupts are pending.
 826                 */
 827                if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
 828                        clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
 829                        vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
 830                }
 831        }
 832
 833        return EMULATE_DONE;
 834}
 835
 836/*
 837 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
 838 * we can catch this, if things ever change
 839 */
 840enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
 841{
 842        struct mips_coproc *cop0 = vcpu->arch.cop0;
 843        unsigned long pc = vcpu->arch.pc;
 844
 845        kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
 846        return EMULATE_FAIL;
 847}
 848
 849/* Write Guest TLB Entry @ Index */
 850enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
 851{
 852        struct mips_coproc *cop0 = vcpu->arch.cop0;
 853        int index = kvm_read_c0_guest_index(cop0);
 854        struct kvm_mips_tlb *tlb = NULL;
 855        unsigned long pc = vcpu->arch.pc;
 856
 857        if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
 858                kvm_debug("%s: illegal index: %d\n", __func__, index);
 859                kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
 860                          pc, index, kvm_read_c0_guest_entryhi(cop0),
 861                          kvm_read_c0_guest_entrylo0(cop0),
 862                          kvm_read_c0_guest_entrylo1(cop0),
 863                          kvm_read_c0_guest_pagemask(cop0));
 864                index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
 865        }
 866
 867        tlb = &vcpu->arch.guest_tlb[index];
 868        /*
 869         * Probe the shadow host TLB for the entry being overwritten, if one
 870         * matches, invalidate it
 871         */
 872        kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
 873
 874        tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
 875        tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
 876        tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
 877        tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
 878
 879        kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
 880                  pc, index, kvm_read_c0_guest_entryhi(cop0),
 881                  kvm_read_c0_guest_entrylo0(cop0),
 882                  kvm_read_c0_guest_entrylo1(cop0),
 883                  kvm_read_c0_guest_pagemask(cop0));
 884
 885        return EMULATE_DONE;
 886}
 887
 888/* Write Guest TLB Entry @ Random Index */
 889enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
 890{
 891        struct mips_coproc *cop0 = vcpu->arch.cop0;
 892        struct kvm_mips_tlb *tlb = NULL;
 893        unsigned long pc = vcpu->arch.pc;
 894        int index;
 895
 896        get_random_bytes(&index, sizeof(index));
 897        index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
 898
 899        tlb = &vcpu->arch.guest_tlb[index];
 900
 901        /*
 902         * Probe the shadow host TLB for the entry being overwritten, if one
 903         * matches, invalidate it
 904         */
 905        kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
 906
 907        tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
 908        tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
 909        tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
 910        tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
 911
 912        kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
 913                  pc, index, kvm_read_c0_guest_entryhi(cop0),
 914                  kvm_read_c0_guest_entrylo0(cop0),
 915                  kvm_read_c0_guest_entrylo1(cop0));
 916
 917        return EMULATE_DONE;
 918}
 919
 920enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
 921{
 922        struct mips_coproc *cop0 = vcpu->arch.cop0;
 923        long entryhi = kvm_read_c0_guest_entryhi(cop0);
 924        unsigned long pc = vcpu->arch.pc;
 925        int index = -1;
 926
 927        index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
 928
 929        kvm_write_c0_guest_index(cop0, index);
 930
 931        kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
 932                  index);
 933
 934        return EMULATE_DONE;
 935}
 936
 937/**
 938 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
 939 * @vcpu:       Virtual CPU.
 940 *
 941 * Finds the mask of bits which are writable in the guest's Config1 CP0
 942 * register, by userland (currently read-only to the guest).
 943 */
 944unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
 945{
 946        unsigned int mask = 0;
 947
 948        /* Permit FPU to be present if FPU is supported */
 949        if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
 950                mask |= MIPS_CONF1_FP;
 951
 952        return mask;
 953}
 954
 955/**
 956 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
 957 * @vcpu:       Virtual CPU.
 958 *
 959 * Finds the mask of bits which are writable in the guest's Config3 CP0
 960 * register, by userland (currently read-only to the guest).
 961 */
 962unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
 963{
 964        /* Config4 and ULRI are optional */
 965        unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
 966
 967        /* Permit MSA to be present if MSA is supported */
 968        if (kvm_mips_guest_can_have_msa(&vcpu->arch))
 969                mask |= MIPS_CONF3_MSA;
 970
 971        return mask;
 972}
 973
 974/**
 975 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
 976 * @vcpu:       Virtual CPU.
 977 *
 978 * Finds the mask of bits which are writable in the guest's Config4 CP0
 979 * register, by userland (currently read-only to the guest).
 980 */
 981unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
 982{
 983        /* Config5 is optional */
 984        unsigned int mask = MIPS_CONF_M;
 985
 986        /* KScrExist */
 987        mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
 988
 989        return mask;
 990}
 991
 992/**
 993 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
 994 * @vcpu:       Virtual CPU.
 995 *
 996 * Finds the mask of bits which are writable in the guest's Config5 CP0
 997 * register, by the guest itself.
 998 */
 999unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1000{
1001        unsigned int mask = 0;
1002
1003        /* Permit MSAEn changes if MSA supported and enabled */
1004        if (kvm_mips_guest_has_msa(&vcpu->arch))
1005                mask |= MIPS_CONF5_MSAEN;
1006
1007        /*
1008         * Permit guest FPU mode changes if FPU is enabled and the relevant
1009         * feature exists according to FIR register.
1010         */
1011        if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1012                if (cpu_has_fre)
1013                        mask |= MIPS_CONF5_FRE;
1014                /* We don't support UFR or UFE */
1015        }
1016
1017        return mask;
1018}
1019
1020enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1021                                           u32 *opc, u32 cause,
1022                                           struct kvm_run *run,
1023                                           struct kvm_vcpu *vcpu)
1024{
1025        struct mips_coproc *cop0 = vcpu->arch.cop0;
1026        enum emulation_result er = EMULATE_DONE;
1027        u32 rt, rd, sel;
1028        unsigned long curr_pc;
1029
1030        /*
1031         * Update PC and hold onto current PC in case there is
1032         * an error and we want to rollback the PC
1033         */
1034        curr_pc = vcpu->arch.pc;
1035        er = update_pc(vcpu, cause);
1036        if (er == EMULATE_FAIL)
1037                return er;
1038
1039        if (inst.co_format.co) {
1040                switch (inst.co_format.func) {
1041                case tlbr_op:   /*  Read indexed TLB entry  */
1042                        er = kvm_mips_emul_tlbr(vcpu);
1043                        break;
1044                case tlbwi_op:  /*  Write indexed  */
1045                        er = kvm_mips_emul_tlbwi(vcpu);
1046                        break;
1047                case tlbwr_op:  /*  Write random  */
1048                        er = kvm_mips_emul_tlbwr(vcpu);
1049                        break;
1050                case tlbp_op:   /* TLB Probe */
1051                        er = kvm_mips_emul_tlbp(vcpu);
1052                        break;
1053                case rfe_op:
1054                        kvm_err("!!!COP0_RFE!!!\n");
1055                        break;
1056                case eret_op:
1057                        er = kvm_mips_emul_eret(vcpu);
1058                        goto dont_update_pc;
1059                case wait_op:
1060                        er = kvm_mips_emul_wait(vcpu);
1061                        break;
1062                }
1063        } else {
1064                rt = inst.c0r_format.rt;
1065                rd = inst.c0r_format.rd;
1066                sel = inst.c0r_format.sel;
1067
1068                switch (inst.c0r_format.rs) {
1069                case mfc_op:
1070#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1071                        cop0->stat[rd][sel]++;
1072#endif
1073                        /* Get reg */
1074                        if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1075                                vcpu->arch.gprs[rt] =
1076                                    (s32)kvm_mips_read_count(vcpu);
1077                        } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1078                                vcpu->arch.gprs[rt] = 0x0;
1079#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1080                                kvm_mips_trans_mfc0(inst, opc, vcpu);
1081#endif
1082                        } else {
1083                                vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
1084
1085#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1086                                kvm_mips_trans_mfc0(inst, opc, vcpu);
1087#endif
1088                        }
1089
1090                        trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1091                                      KVM_TRACE_COP0(rd, sel),
1092                                      vcpu->arch.gprs[rt]);
1093                        break;
1094
1095                case dmfc_op:
1096                        vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1097
1098                        trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1099                                      KVM_TRACE_COP0(rd, sel),
1100                                      vcpu->arch.gprs[rt]);
1101                        break;
1102
1103                case mtc_op:
1104#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1105                        cop0->stat[rd][sel]++;
1106#endif
1107                        trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1108                                      KVM_TRACE_COP0(rd, sel),
1109                                      vcpu->arch.gprs[rt]);
1110
1111                        if ((rd == MIPS_CP0_TLB_INDEX)
1112                            && (vcpu->arch.gprs[rt] >=
1113                                KVM_MIPS_GUEST_TLB_SIZE)) {
1114                                kvm_err("Invalid TLB Index: %ld",
1115                                        vcpu->arch.gprs[rt]);
1116                                er = EMULATE_FAIL;
1117                                break;
1118                        }
1119#define C0_EBASE_CORE_MASK 0xff
1120                        if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1121                                /* Preserve CORE number */
1122                                kvm_change_c0_guest_ebase(cop0,
1123                                                          ~(C0_EBASE_CORE_MASK),
1124                                                          vcpu->arch.gprs[rt]);
1125                                kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1126                                        kvm_read_c0_guest_ebase(cop0));
1127                        } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1128                                u32 nasid =
1129                                        vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
1130                                if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1131                                    ((kvm_read_c0_guest_entryhi(cop0) &
1132                                      KVM_ENTRYHI_ASID) != nasid)) {
1133                                        trace_kvm_asid_change(vcpu,
1134                                                kvm_read_c0_guest_entryhi(cop0)
1135                                                        & KVM_ENTRYHI_ASID,
1136                                                nasid);
1137
1138                                        /* Blow away the shadow host TLBs */
1139                                        kvm_mips_flush_host_tlb(1);
1140                                }
1141                                kvm_write_c0_guest_entryhi(cop0,
1142                                                           vcpu->arch.gprs[rt]);
1143                        }
1144                        /* Are we writing to COUNT */
1145                        else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1146                                kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1147                                goto done;
1148                        } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1149                                /* If we are writing to COMPARE */
1150                                /* Clear pending timer interrupt, if any */
1151                                kvm_mips_write_compare(vcpu,
1152                                                       vcpu->arch.gprs[rt],
1153                                                       true);
1154                        } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1155                                unsigned int old_val, val, change;
1156
1157                                old_val = kvm_read_c0_guest_status(cop0);
1158                                val = vcpu->arch.gprs[rt];
1159                                change = val ^ old_val;
1160
1161                                /* Make sure that the NMI bit is never set */
1162                                val &= ~ST0_NMI;
1163
1164                                /*
1165                                 * Don't allow CU1 or FR to be set unless FPU
1166                                 * capability enabled and exists in guest
1167                                 * configuration.
1168                                 */
1169                                if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1170                                        val &= ~(ST0_CU1 | ST0_FR);
1171
1172                                /*
1173                                 * Also don't allow FR to be set if host doesn't
1174                                 * support it.
1175                                 */
1176                                if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1177                                        val &= ~ST0_FR;
1178
1179
1180                                /* Handle changes in FPU mode */
1181                                preempt_disable();
1182
1183                                /*
1184                                 * FPU and Vector register state is made
1185                                 * UNPREDICTABLE by a change of FR, so don't
1186                                 * even bother saving it.
1187                                 */
1188                                if (change & ST0_FR)
1189                                        kvm_drop_fpu(vcpu);
1190
1191                                /*
1192                                 * If MSA state is already live, it is undefined
1193                                 * how it interacts with FR=0 FPU state, and we
1194                                 * don't want to hit reserved instruction
1195                                 * exceptions trying to save the MSA state later
1196                                 * when CU=1 && FR=1, so play it safe and save
1197                                 * it first.
1198                                 */
1199                                if (change & ST0_CU1 && !(val & ST0_FR) &&
1200                                    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1201                                        kvm_lose_fpu(vcpu);
1202
1203                                /*
1204                                 * Propagate CU1 (FPU enable) changes
1205                                 * immediately if the FPU context is already
1206                                 * loaded. When disabling we leave the context
1207                                 * loaded so it can be quickly enabled again in
1208                                 * the near future.
1209                                 */
1210                                if (change & ST0_CU1 &&
1211                                    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1212                                        change_c0_status(ST0_CU1, val);
1213
1214                                preempt_enable();
1215
1216                                kvm_write_c0_guest_status(cop0, val);
1217
1218#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1219                                /*
1220                                 * If FPU present, we need CU1/FR bits to take
1221                                 * effect fairly soon.
1222                                 */
1223                                if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1224                                        kvm_mips_trans_mtc0(inst, opc, vcpu);
1225#endif
1226                        } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1227                                unsigned int old_val, val, change, wrmask;
1228
1229                                old_val = kvm_read_c0_guest_config5(cop0);
1230                                val = vcpu->arch.gprs[rt];
1231
1232                                /* Only a few bits are writable in Config5 */
1233                                wrmask = kvm_mips_config5_wrmask(vcpu);
1234                                change = (val ^ old_val) & wrmask;
1235                                val = old_val ^ change;
1236
1237
1238                                /* Handle changes in FPU/MSA modes */
1239                                preempt_disable();
1240
1241                                /*
1242                                 * Propagate FRE changes immediately if the FPU
1243                                 * context is already loaded.
1244                                 */
1245                                if (change & MIPS_CONF5_FRE &&
1246                                    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1247                                        change_c0_config5(MIPS_CONF5_FRE, val);
1248
1249                                /*
1250                                 * Propagate MSAEn changes immediately if the
1251                                 * MSA context is already loaded. When disabling
1252                                 * we leave the context loaded so it can be
1253                                 * quickly enabled again in the near future.
1254                                 */
1255                                if (change & MIPS_CONF5_MSAEN &&
1256                                    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1257                                        change_c0_config5(MIPS_CONF5_MSAEN,
1258                                                          val);
1259
1260                                preempt_enable();
1261
1262                                kvm_write_c0_guest_config5(cop0, val);
1263                        } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1264                                u32 old_cause, new_cause;
1265
1266                                old_cause = kvm_read_c0_guest_cause(cop0);
1267                                new_cause = vcpu->arch.gprs[rt];
1268                                /* Update R/W bits */
1269                                kvm_change_c0_guest_cause(cop0, 0x08800300,
1270                                                          new_cause);
1271                                /* DC bit enabling/disabling timer? */
1272                                if ((old_cause ^ new_cause) & CAUSEF_DC) {
1273                                        if (new_cause & CAUSEF_DC)
1274                                                kvm_mips_count_disable_cause(vcpu);
1275                                        else
1276                                                kvm_mips_count_enable_cause(vcpu);
1277                                }
1278                        } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1279                                u32 mask = MIPS_HWRENA_CPUNUM |
1280                                           MIPS_HWRENA_SYNCISTEP |
1281                                           MIPS_HWRENA_CC |
1282                                           MIPS_HWRENA_CCRES;
1283
1284                                if (kvm_read_c0_guest_config3(cop0) &
1285                                    MIPS_CONF3_ULRI)
1286                                        mask |= MIPS_HWRENA_ULR;
1287                                cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
1288                        } else {
1289                                cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1290#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1291                                kvm_mips_trans_mtc0(inst, opc, vcpu);
1292#endif
1293                        }
1294                        break;
1295
1296                case dmtc_op:
1297                        kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1298                                vcpu->arch.pc, rt, rd, sel);
1299                        trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1300                                      KVM_TRACE_COP0(rd, sel),
1301                                      vcpu->arch.gprs[rt]);
1302                        er = EMULATE_FAIL;
1303                        break;
1304
1305                case mfmc0_op:
1306#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1307                        cop0->stat[MIPS_CP0_STATUS][0]++;
1308#endif
1309                        if (rt != 0)
1310                                vcpu->arch.gprs[rt] =
1311                                    kvm_read_c0_guest_status(cop0);
1312                        /* EI */
1313                        if (inst.mfmc0_format.sc) {
1314                                kvm_debug("[%#lx] mfmc0_op: EI\n",
1315                                          vcpu->arch.pc);
1316                                kvm_set_c0_guest_status(cop0, ST0_IE);
1317                        } else {
1318                                kvm_debug("[%#lx] mfmc0_op: DI\n",
1319                                          vcpu->arch.pc);
1320                                kvm_clear_c0_guest_status(cop0, ST0_IE);
1321                        }
1322
1323                        break;
1324
1325                case wrpgpr_op:
1326                        {
1327                                u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1328                                u32 pss =
1329                                    (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1330                                /*
1331                                 * We don't support any shadow register sets, so
1332                                 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1333                                 */
1334                                if (css || pss) {
1335                                        er = EMULATE_FAIL;
1336                                        break;
1337                                }
1338                                kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1339                                          vcpu->arch.gprs[rt]);
1340                                vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1341                        }
1342                        break;
1343                default:
1344                        kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1345                                vcpu->arch.pc, inst.c0r_format.rs);
1346                        er = EMULATE_FAIL;
1347                        break;
1348                }
1349        }
1350
1351done:
1352        /* Rollback PC only if emulation was unsuccessful */
1353        if (er == EMULATE_FAIL)
1354                vcpu->arch.pc = curr_pc;
1355
1356dont_update_pc:
1357        /*
1358         * This is for special instructions whose emulation
1359         * updates the PC, so do not overwrite the PC under
1360         * any circumstances
1361         */
1362
1363        return er;
1364}
1365
1366enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1367                                             u32 cause,
1368                                             struct kvm_run *run,
1369                                             struct kvm_vcpu *vcpu)
1370{
1371        enum emulation_result er = EMULATE_DO_MMIO;
1372        u32 rt;
1373        u32 bytes;
1374        void *data = run->mmio.data;
1375        unsigned long curr_pc;
1376
1377        /*
1378         * Update PC and hold onto current PC in case there is
1379         * an error and we want to rollback the PC
1380         */
1381        curr_pc = vcpu->arch.pc;
1382        er = update_pc(vcpu, cause);
1383        if (er == EMULATE_FAIL)
1384                return er;
1385
1386        rt = inst.i_format.rt;
1387
1388        switch (inst.i_format.opcode) {
1389        case sb_op:
1390                bytes = 1;
1391                if (bytes > sizeof(run->mmio.data)) {
1392                        kvm_err("%s: bad MMIO length: %d\n", __func__,
1393                               run->mmio.len);
1394                }
1395                run->mmio.phys_addr =
1396                    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1397                                                   host_cp0_badvaddr);
1398                if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1399                        er = EMULATE_FAIL;
1400                        break;
1401                }
1402                run->mmio.len = bytes;
1403                run->mmio.is_write = 1;
1404                vcpu->mmio_needed = 1;
1405                vcpu->mmio_is_write = 1;
1406                *(u8 *) data = vcpu->arch.gprs[rt];
1407                kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1408                          vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1409                          *(u8 *) data);
1410
1411                break;
1412
1413        case sw_op:
1414                bytes = 4;
1415                if (bytes > sizeof(run->mmio.data)) {
1416                        kvm_err("%s: bad MMIO length: %d\n", __func__,
1417                               run->mmio.len);
1418                }
1419                run->mmio.phys_addr =
1420                    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1421                                                   host_cp0_badvaddr);
1422                if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1423                        er = EMULATE_FAIL;
1424                        break;
1425                }
1426
1427                run->mmio.len = bytes;
1428                run->mmio.is_write = 1;
1429                vcpu->mmio_needed = 1;
1430                vcpu->mmio_is_write = 1;
1431                *(u32 *) data = vcpu->arch.gprs[rt];
1432
1433                kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1434                          vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1435                          vcpu->arch.gprs[rt], *(u32 *) data);
1436                break;
1437
1438        case sh_op:
1439                bytes = 2;
1440                if (bytes > sizeof(run->mmio.data)) {
1441                        kvm_err("%s: bad MMIO length: %d\n", __func__,
1442                               run->mmio.len);
1443                }
1444                run->mmio.phys_addr =
1445                    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1446                                                   host_cp0_badvaddr);
1447                if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1448                        er = EMULATE_FAIL;
1449                        break;
1450                }
1451
1452                run->mmio.len = bytes;
1453                run->mmio.is_write = 1;
1454                vcpu->mmio_needed = 1;
1455                vcpu->mmio_is_write = 1;
1456                *(u16 *) data = vcpu->arch.gprs[rt];
1457
1458                kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1459                          vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1460                          vcpu->arch.gprs[rt], *(u32 *) data);
1461                break;
1462
1463        default:
1464                kvm_err("Store not yet supported (inst=0x%08x)\n",
1465                        inst.word);
1466                er = EMULATE_FAIL;
1467                break;
1468        }
1469
1470        /* Rollback PC if emulation was unsuccessful */
1471        if (er == EMULATE_FAIL)
1472                vcpu->arch.pc = curr_pc;
1473
1474        return er;
1475}
1476
1477enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1478                                            u32 cause, struct kvm_run *run,
1479                                            struct kvm_vcpu *vcpu)
1480{
1481        enum emulation_result er = EMULATE_DO_MMIO;
1482        u32 op, rt;
1483        u32 bytes;
1484
1485        rt = inst.i_format.rt;
1486        op = inst.i_format.opcode;
1487
1488        vcpu->arch.pending_load_cause = cause;
1489        vcpu->arch.io_gpr = rt;
1490
1491        switch (op) {
1492        case lw_op:
1493                bytes = 4;
1494                if (bytes > sizeof(run->mmio.data)) {
1495                        kvm_err("%s: bad MMIO length: %d\n", __func__,
1496                               run->mmio.len);
1497                        er = EMULATE_FAIL;
1498                        break;
1499                }
1500                run->mmio.phys_addr =
1501                    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1502                                                   host_cp0_badvaddr);
1503                if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1504                        er = EMULATE_FAIL;
1505                        break;
1506                }
1507
1508                run->mmio.len = bytes;
1509                run->mmio.is_write = 0;
1510                vcpu->mmio_needed = 1;
1511                vcpu->mmio_is_write = 0;
1512                break;
1513
1514        case lh_op:
1515        case lhu_op:
1516                bytes = 2;
1517                if (bytes > sizeof(run->mmio.data)) {
1518                        kvm_err("%s: bad MMIO length: %d\n", __func__,
1519                               run->mmio.len);
1520                        er = EMULATE_FAIL;
1521                        break;
1522                }
1523                run->mmio.phys_addr =
1524                    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1525                                                   host_cp0_badvaddr);
1526                if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1527                        er = EMULATE_FAIL;
1528                        break;
1529                }
1530
1531                run->mmio.len = bytes;
1532                run->mmio.is_write = 0;
1533                vcpu->mmio_needed = 1;
1534                vcpu->mmio_is_write = 0;
1535
1536                if (op == lh_op)
1537                        vcpu->mmio_needed = 2;
1538                else
1539                        vcpu->mmio_needed = 1;
1540
1541                break;
1542
1543        case lbu_op:
1544        case lb_op:
1545                bytes = 1;
1546                if (bytes > sizeof(run->mmio.data)) {
1547                        kvm_err("%s: bad MMIO length: %d\n", __func__,
1548                               run->mmio.len);
1549                        er = EMULATE_FAIL;
1550                        break;
1551                }
1552                run->mmio.phys_addr =
1553                    kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1554                                                   host_cp0_badvaddr);
1555                if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1556                        er = EMULATE_FAIL;
1557                        break;
1558                }
1559
1560                run->mmio.len = bytes;
1561                run->mmio.is_write = 0;
1562                vcpu->mmio_is_write = 0;
1563
1564                if (op == lb_op)
1565                        vcpu->mmio_needed = 2;
1566                else
1567                        vcpu->mmio_needed = 1;
1568
1569                break;
1570
1571        default:
1572                kvm_err("Load not yet supported (inst=0x%08x)\n",
1573                        inst.word);
1574                er = EMULATE_FAIL;
1575                break;
1576        }
1577
1578        return er;
1579}
1580
1581enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1582                                             u32 *opc, u32 cause,
1583                                             struct kvm_run *run,
1584                                             struct kvm_vcpu *vcpu)
1585{
1586        struct mips_coproc *cop0 = vcpu->arch.cop0;
1587        enum emulation_result er = EMULATE_DONE;
1588        u32 cache, op_inst, op, base;
1589        s16 offset;
1590        struct kvm_vcpu_arch *arch = &vcpu->arch;
1591        unsigned long va;
1592        unsigned long curr_pc;
1593
1594        /*
1595         * Update PC and hold onto current PC in case there is
1596         * an error and we want to rollback the PC
1597         */
1598        curr_pc = vcpu->arch.pc;
1599        er = update_pc(vcpu, cause);
1600        if (er == EMULATE_FAIL)
1601                return er;
1602
1603        base = inst.i_format.rs;
1604        op_inst = inst.i_format.rt;
1605        if (cpu_has_mips_r6)
1606                offset = inst.spec3_format.simmediate;
1607        else
1608                offset = inst.i_format.simmediate;
1609        cache = op_inst & CacheOp_Cache;
1610        op = op_inst & CacheOp_Op;
1611
1612        va = arch->gprs[base] + offset;
1613
1614        kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1615                  cache, op, base, arch->gprs[base], offset);
1616
1617        /*
1618         * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1619         * invalidate the caches entirely by stepping through all the
1620         * ways/indexes
1621         */
1622        if (op == Index_Writeback_Inv) {
1623                kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1624                          vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1625                          arch->gprs[base], offset);
1626
1627                if (cache == Cache_D)
1628                        r4k_blast_dcache();
1629                else if (cache == Cache_I)
1630                        r4k_blast_icache();
1631                else {
1632                        kvm_err("%s: unsupported CACHE INDEX operation\n",
1633                                __func__);
1634                        return EMULATE_FAIL;
1635                }
1636
1637#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1638                kvm_mips_trans_cache_index(inst, opc, vcpu);
1639#endif
1640                goto done;
1641        }
1642
1643        preempt_disable();
1644        if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1645                if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
1646                    kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
1647                        kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
1648                                __func__, va, vcpu, read_c0_entryhi());
1649                        er = EMULATE_FAIL;
1650                        preempt_enable();
1651                        goto done;
1652                }
1653        } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1654                   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1655                int index;
1656
1657                /* If an entry already exists then skip */
1658                if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1659                        goto skip_fault;
1660
1661                /*
1662                 * If address not in the guest TLB, then give the guest a fault,
1663                 * the resulting handler will do the right thing
1664                 */
1665                index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1666                                                  (kvm_read_c0_guest_entryhi
1667                                                   (cop0) & KVM_ENTRYHI_ASID));
1668
1669                if (index < 0) {
1670                        vcpu->arch.host_cp0_badvaddr = va;
1671                        vcpu->arch.pc = curr_pc;
1672                        er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1673                                                         vcpu);
1674                        preempt_enable();
1675                        goto dont_update_pc;
1676                } else {
1677                        struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1678                        /*
1679                         * Check if the entry is valid, if not then setup a TLB
1680                         * invalid exception to the guest
1681                         */
1682                        if (!TLB_IS_VALID(*tlb, va)) {
1683                                vcpu->arch.host_cp0_badvaddr = va;
1684                                vcpu->arch.pc = curr_pc;
1685                                er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1686                                                                run, vcpu);
1687                                preempt_enable();
1688                                goto dont_update_pc;
1689                        }
1690                        /*
1691                         * We fault an entry from the guest tlb to the
1692                         * shadow host TLB
1693                         */
1694                        if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
1695                                kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
1696                                        __func__, va, index, vcpu,
1697                                        read_c0_entryhi());
1698                                er = EMULATE_FAIL;
1699                                preempt_enable();
1700                                goto done;
1701                        }
1702                }
1703        } else {
1704                kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1705                        cache, op, base, arch->gprs[base], offset);
1706                er = EMULATE_FAIL;
1707                preempt_enable();
1708                goto done;
1709
1710        }
1711
1712skip_fault:
1713        /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1714        if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1715                flush_dcache_line(va);
1716
1717#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1718                /*
1719                 * Replace the CACHE instruction, with a SYNCI, not the same,
1720                 * but avoids a trap
1721                 */
1722                kvm_mips_trans_cache_va(inst, opc, vcpu);
1723#endif
1724        } else if (op_inst == Hit_Invalidate_I) {
1725                flush_dcache_line(va);
1726                flush_icache_line(va);
1727
1728#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1729                /* Replace the CACHE instruction, with a SYNCI */
1730                kvm_mips_trans_cache_va(inst, opc, vcpu);
1731#endif
1732        } else {
1733                kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1734                        cache, op, base, arch->gprs[base], offset);
1735                er = EMULATE_FAIL;
1736        }
1737
1738        preempt_enable();
1739done:
1740        /* Rollback PC only if emulation was unsuccessful */
1741        if (er == EMULATE_FAIL)
1742                vcpu->arch.pc = curr_pc;
1743
1744dont_update_pc:
1745        /*
1746         * This is for exceptions whose emulation updates the PC, so do not
1747         * overwrite the PC under any circumstances
1748         */
1749
1750        return er;
1751}
1752
1753enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
1754                                            struct kvm_run *run,
1755                                            struct kvm_vcpu *vcpu)
1756{
1757        union mips_instruction inst;
1758        enum emulation_result er = EMULATE_DONE;
1759
1760        /* Fetch the instruction. */
1761        if (cause & CAUSEF_BD)
1762                opc += 1;
1763
1764        inst.word = kvm_get_inst(opc, vcpu);
1765
1766        switch (inst.r_format.opcode) {
1767        case cop0_op:
1768                er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1769                break;
1770        case sb_op:
1771        case sh_op:
1772        case sw_op:
1773                er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1774                break;
1775        case lb_op:
1776        case lbu_op:
1777        case lhu_op:
1778        case lh_op:
1779        case lw_op:
1780                er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1781                break;
1782
1783#ifndef CONFIG_CPU_MIPSR6
1784        case cache_op:
1785                ++vcpu->stat.cache_exits;
1786                trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1787                er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1788                break;
1789#else
1790        case spec3_op:
1791                switch (inst.spec3_format.func) {
1792                case cache6_op:
1793                        ++vcpu->stat.cache_exits;
1794                        trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1795                        er = kvm_mips_emulate_cache(inst, opc, cause, run,
1796                                                    vcpu);
1797                        break;
1798                default:
1799                        goto unknown;
1800                };
1801                break;
1802unknown:
1803#endif
1804
1805        default:
1806                kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1807                        inst.word);
1808                kvm_arch_vcpu_dump_regs(vcpu);
1809                er = EMULATE_FAIL;
1810                break;
1811        }
1812
1813        return er;
1814}
1815
1816enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1817                                               u32 *opc,
1818                                               struct kvm_run *run,
1819                                               struct kvm_vcpu *vcpu)
1820{
1821        struct mips_coproc *cop0 = vcpu->arch.cop0;
1822        struct kvm_vcpu_arch *arch = &vcpu->arch;
1823        enum emulation_result er = EMULATE_DONE;
1824
1825        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1826                /* save old pc */
1827                kvm_write_c0_guest_epc(cop0, arch->pc);
1828                kvm_set_c0_guest_status(cop0, ST0_EXL);
1829
1830                if (cause & CAUSEF_BD)
1831                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1832                else
1833                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1834
1835                kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1836
1837                kvm_change_c0_guest_cause(cop0, (0xff),
1838                                          (EXCCODE_SYS << CAUSEB_EXCCODE));
1839
1840                /* Set PC to the exception entry point */
1841                arch->pc = KVM_GUEST_KSEG0 + 0x180;
1842
1843        } else {
1844                kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1845                er = EMULATE_FAIL;
1846        }
1847
1848        return er;
1849}
1850
1851enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1852                                                  u32 *opc,
1853                                                  struct kvm_run *run,
1854                                                  struct kvm_vcpu *vcpu)
1855{
1856        struct mips_coproc *cop0 = vcpu->arch.cop0;
1857        struct kvm_vcpu_arch *arch = &vcpu->arch;
1858        unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
1859                        (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1860
1861        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1862                /* save old pc */
1863                kvm_write_c0_guest_epc(cop0, arch->pc);
1864                kvm_set_c0_guest_status(cop0, ST0_EXL);
1865
1866                if (cause & CAUSEF_BD)
1867                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1868                else
1869                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1870
1871                kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1872                          arch->pc);
1873
1874                /* set pc to the exception entry point */
1875                arch->pc = KVM_GUEST_KSEG0 + 0x0;
1876
1877        } else {
1878                kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1879                          arch->pc);
1880
1881                arch->pc = KVM_GUEST_KSEG0 + 0x180;
1882        }
1883
1884        kvm_change_c0_guest_cause(cop0, (0xff),
1885                                  (EXCCODE_TLBL << CAUSEB_EXCCODE));
1886
1887        /* setup badvaddr, context and entryhi registers for the guest */
1888        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1889        /* XXXKYMA: is the context register used by linux??? */
1890        kvm_write_c0_guest_entryhi(cop0, entryhi);
1891        /* Blow away the shadow host TLBs */
1892        kvm_mips_flush_host_tlb(1);
1893
1894        return EMULATE_DONE;
1895}
1896
1897enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
1898                                                 u32 *opc,
1899                                                 struct kvm_run *run,
1900                                                 struct kvm_vcpu *vcpu)
1901{
1902        struct mips_coproc *cop0 = vcpu->arch.cop0;
1903        struct kvm_vcpu_arch *arch = &vcpu->arch;
1904        unsigned long entryhi =
1905                (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1906                (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1907
1908        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1909                /* save old pc */
1910                kvm_write_c0_guest_epc(cop0, arch->pc);
1911                kvm_set_c0_guest_status(cop0, ST0_EXL);
1912
1913                if (cause & CAUSEF_BD)
1914                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1915                else
1916                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1917
1918                kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1919                          arch->pc);
1920
1921                /* set pc to the exception entry point */
1922                arch->pc = KVM_GUEST_KSEG0 + 0x180;
1923
1924        } else {
1925                kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1926                          arch->pc);
1927                arch->pc = KVM_GUEST_KSEG0 + 0x180;
1928        }
1929
1930        kvm_change_c0_guest_cause(cop0, (0xff),
1931                                  (EXCCODE_TLBL << CAUSEB_EXCCODE));
1932
1933        /* setup badvaddr, context and entryhi registers for the guest */
1934        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1935        /* XXXKYMA: is the context register used by linux??? */
1936        kvm_write_c0_guest_entryhi(cop0, entryhi);
1937        /* Blow away the shadow host TLBs */
1938        kvm_mips_flush_host_tlb(1);
1939
1940        return EMULATE_DONE;
1941}
1942
1943enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
1944                                                  u32 *opc,
1945                                                  struct kvm_run *run,
1946                                                  struct kvm_vcpu *vcpu)
1947{
1948        struct mips_coproc *cop0 = vcpu->arch.cop0;
1949        struct kvm_vcpu_arch *arch = &vcpu->arch;
1950        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1951                        (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1952
1953        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1954                /* save old pc */
1955                kvm_write_c0_guest_epc(cop0, arch->pc);
1956                kvm_set_c0_guest_status(cop0, ST0_EXL);
1957
1958                if (cause & CAUSEF_BD)
1959                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1960                else
1961                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1962
1963                kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1964                          arch->pc);
1965
1966                /* Set PC to the exception entry point */
1967                arch->pc = KVM_GUEST_KSEG0 + 0x0;
1968        } else {
1969                kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1970                          arch->pc);
1971                arch->pc = KVM_GUEST_KSEG0 + 0x180;
1972        }
1973
1974        kvm_change_c0_guest_cause(cop0, (0xff),
1975                                  (EXCCODE_TLBS << CAUSEB_EXCCODE));
1976
1977        /* setup badvaddr, context and entryhi registers for the guest */
1978        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1979        /* XXXKYMA: is the context register used by linux??? */
1980        kvm_write_c0_guest_entryhi(cop0, entryhi);
1981        /* Blow away the shadow host TLBs */
1982        kvm_mips_flush_host_tlb(1);
1983
1984        return EMULATE_DONE;
1985}
1986
1987enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
1988                                                 u32 *opc,
1989                                                 struct kvm_run *run,
1990                                                 struct kvm_vcpu *vcpu)
1991{
1992        struct mips_coproc *cop0 = vcpu->arch.cop0;
1993        struct kvm_vcpu_arch *arch = &vcpu->arch;
1994        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1995                (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
1996
1997        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1998                /* save old pc */
1999                kvm_write_c0_guest_epc(cop0, arch->pc);
2000                kvm_set_c0_guest_status(cop0, ST0_EXL);
2001
2002                if (cause & CAUSEF_BD)
2003                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2004                else
2005                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2006
2007                kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2008                          arch->pc);
2009
2010                /* Set PC to the exception entry point */
2011                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2012        } else {
2013                kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2014                          arch->pc);
2015                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2016        }
2017
2018        kvm_change_c0_guest_cause(cop0, (0xff),
2019                                  (EXCCODE_TLBS << CAUSEB_EXCCODE));
2020
2021        /* setup badvaddr, context and entryhi registers for the guest */
2022        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2023        /* XXXKYMA: is the context register used by linux??? */
2024        kvm_write_c0_guest_entryhi(cop0, entryhi);
2025        /* Blow away the shadow host TLBs */
2026        kvm_mips_flush_host_tlb(1);
2027
2028        return EMULATE_DONE;
2029}
2030
2031/* TLBMOD: store into address matching TLB with Dirty bit off */
2032enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
2033                                             struct kvm_run *run,
2034                                             struct kvm_vcpu *vcpu)
2035{
2036        enum emulation_result er = EMULATE_DONE;
2037#ifdef DEBUG
2038        struct mips_coproc *cop0 = vcpu->arch.cop0;
2039        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2040                        (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2041        int index;
2042
2043        /* If address not in the guest TLB, then we are in trouble */
2044        index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
2045        if (index < 0) {
2046                /* XXXKYMA Invalidate and retry */
2047                kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
2048                kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
2049                     __func__, entryhi);
2050                kvm_mips_dump_guest_tlbs(vcpu);
2051                kvm_mips_dump_host_tlbs();
2052                return EMULATE_FAIL;
2053        }
2054#endif
2055
2056        er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
2057        return er;
2058}
2059
2060enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
2061                                              u32 *opc,
2062                                              struct kvm_run *run,
2063                                              struct kvm_vcpu *vcpu)
2064{
2065        struct mips_coproc *cop0 = vcpu->arch.cop0;
2066        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2067                        (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2068        struct kvm_vcpu_arch *arch = &vcpu->arch;
2069
2070        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2071                /* save old pc */
2072                kvm_write_c0_guest_epc(cop0, arch->pc);
2073                kvm_set_c0_guest_status(cop0, ST0_EXL);
2074
2075                if (cause & CAUSEF_BD)
2076                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2077                else
2078                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2079
2080                kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2081                          arch->pc);
2082
2083                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2084        } else {
2085                kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2086                          arch->pc);
2087                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2088        }
2089
2090        kvm_change_c0_guest_cause(cop0, (0xff),
2091                                  (EXCCODE_MOD << CAUSEB_EXCCODE));
2092
2093        /* setup badvaddr, context and entryhi registers for the guest */
2094        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2095        /* XXXKYMA: is the context register used by linux??? */
2096        kvm_write_c0_guest_entryhi(cop0, entryhi);
2097        /* Blow away the shadow host TLBs */
2098        kvm_mips_flush_host_tlb(1);
2099
2100        return EMULATE_DONE;
2101}
2102
2103enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
2104                                               u32 *opc,
2105                                               struct kvm_run *run,
2106                                               struct kvm_vcpu *vcpu)
2107{
2108        struct mips_coproc *cop0 = vcpu->arch.cop0;
2109        struct kvm_vcpu_arch *arch = &vcpu->arch;
2110
2111        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2112                /* save old pc */
2113                kvm_write_c0_guest_epc(cop0, arch->pc);
2114                kvm_set_c0_guest_status(cop0, ST0_EXL);
2115
2116                if (cause & CAUSEF_BD)
2117                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2118                else
2119                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2120
2121        }
2122
2123        arch->pc = KVM_GUEST_KSEG0 + 0x180;
2124
2125        kvm_change_c0_guest_cause(cop0, (0xff),
2126                                  (EXCCODE_CPU << CAUSEB_EXCCODE));
2127        kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2128
2129        return EMULATE_DONE;
2130}
2131
2132enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
2133                                              u32 *opc,
2134                                              struct kvm_run *run,
2135                                              struct kvm_vcpu *vcpu)
2136{
2137        struct mips_coproc *cop0 = vcpu->arch.cop0;
2138        struct kvm_vcpu_arch *arch = &vcpu->arch;
2139        enum emulation_result er = EMULATE_DONE;
2140
2141        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2142                /* save old pc */
2143                kvm_write_c0_guest_epc(cop0, arch->pc);
2144                kvm_set_c0_guest_status(cop0, ST0_EXL);
2145
2146                if (cause & CAUSEF_BD)
2147                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2148                else
2149                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2150
2151                kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2152
2153                kvm_change_c0_guest_cause(cop0, (0xff),
2154                                          (EXCCODE_RI << CAUSEB_EXCCODE));
2155
2156                /* Set PC to the exception entry point */
2157                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2158
2159        } else {
2160                kvm_err("Trying to deliver RI when EXL is already set\n");
2161                er = EMULATE_FAIL;
2162        }
2163
2164        return er;
2165}
2166
2167enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
2168                                              u32 *opc,
2169                                              struct kvm_run *run,
2170                                              struct kvm_vcpu *vcpu)
2171{
2172        struct mips_coproc *cop0 = vcpu->arch.cop0;
2173        struct kvm_vcpu_arch *arch = &vcpu->arch;
2174        enum emulation_result er = EMULATE_DONE;
2175
2176        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2177                /* save old pc */
2178                kvm_write_c0_guest_epc(cop0, arch->pc);
2179                kvm_set_c0_guest_status(cop0, ST0_EXL);
2180
2181                if (cause & CAUSEF_BD)
2182                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2183                else
2184                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2185
2186                kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2187
2188                kvm_change_c0_guest_cause(cop0, (0xff),
2189                                          (EXCCODE_BP << CAUSEB_EXCCODE));
2190
2191                /* Set PC to the exception entry point */
2192                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2193
2194        } else {
2195                kvm_err("Trying to deliver BP when EXL is already set\n");
2196                er = EMULATE_FAIL;
2197        }
2198
2199        return er;
2200}
2201
2202enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
2203                                                u32 *opc,
2204                                                struct kvm_run *run,
2205                                                struct kvm_vcpu *vcpu)
2206{
2207        struct mips_coproc *cop0 = vcpu->arch.cop0;
2208        struct kvm_vcpu_arch *arch = &vcpu->arch;
2209        enum emulation_result er = EMULATE_DONE;
2210
2211        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2212                /* save old pc */
2213                kvm_write_c0_guest_epc(cop0, arch->pc);
2214                kvm_set_c0_guest_status(cop0, ST0_EXL);
2215
2216                if (cause & CAUSEF_BD)
2217                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2218                else
2219                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2220
2221                kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2222
2223                kvm_change_c0_guest_cause(cop0, (0xff),
2224                                          (EXCCODE_TR << CAUSEB_EXCCODE));
2225
2226                /* Set PC to the exception entry point */
2227                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2228
2229        } else {
2230                kvm_err("Trying to deliver TRAP when EXL is already set\n");
2231                er = EMULATE_FAIL;
2232        }
2233
2234        return er;
2235}
2236
2237enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
2238                                                  u32 *opc,
2239                                                  struct kvm_run *run,
2240                                                  struct kvm_vcpu *vcpu)
2241{
2242        struct mips_coproc *cop0 = vcpu->arch.cop0;
2243        struct kvm_vcpu_arch *arch = &vcpu->arch;
2244        enum emulation_result er = EMULATE_DONE;
2245
2246        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2247                /* save old pc */
2248                kvm_write_c0_guest_epc(cop0, arch->pc);
2249                kvm_set_c0_guest_status(cop0, ST0_EXL);
2250
2251                if (cause & CAUSEF_BD)
2252                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2253                else
2254                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2255
2256                kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2257
2258                kvm_change_c0_guest_cause(cop0, (0xff),
2259                                          (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2260
2261                /* Set PC to the exception entry point */
2262                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2263
2264        } else {
2265                kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2266                er = EMULATE_FAIL;
2267        }
2268
2269        return er;
2270}
2271
2272enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
2273                                               u32 *opc,
2274                                               struct kvm_run *run,
2275                                               struct kvm_vcpu *vcpu)
2276{
2277        struct mips_coproc *cop0 = vcpu->arch.cop0;
2278        struct kvm_vcpu_arch *arch = &vcpu->arch;
2279        enum emulation_result er = EMULATE_DONE;
2280
2281        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2282                /* save old pc */
2283                kvm_write_c0_guest_epc(cop0, arch->pc);
2284                kvm_set_c0_guest_status(cop0, ST0_EXL);
2285
2286                if (cause & CAUSEF_BD)
2287                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2288                else
2289                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2290
2291                kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2292
2293                kvm_change_c0_guest_cause(cop0, (0xff),
2294                                          (EXCCODE_FPE << CAUSEB_EXCCODE));
2295
2296                /* Set PC to the exception entry point */
2297                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2298
2299        } else {
2300                kvm_err("Trying to deliver FPE when EXL is already set\n");
2301                er = EMULATE_FAIL;
2302        }
2303
2304        return er;
2305}
2306
2307enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
2308                                                  u32 *opc,
2309                                                  struct kvm_run *run,
2310                                                  struct kvm_vcpu *vcpu)
2311{
2312        struct mips_coproc *cop0 = vcpu->arch.cop0;
2313        struct kvm_vcpu_arch *arch = &vcpu->arch;
2314        enum emulation_result er = EMULATE_DONE;
2315
2316        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2317                /* save old pc */
2318                kvm_write_c0_guest_epc(cop0, arch->pc);
2319                kvm_set_c0_guest_status(cop0, ST0_EXL);
2320
2321                if (cause & CAUSEF_BD)
2322                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2323                else
2324                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2325
2326                kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2327
2328                kvm_change_c0_guest_cause(cop0, (0xff),
2329                                          (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2330
2331                /* Set PC to the exception entry point */
2332                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2333
2334        } else {
2335                kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2336                er = EMULATE_FAIL;
2337        }
2338
2339        return er;
2340}
2341
2342enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2343                                         struct kvm_run *run,
2344                                         struct kvm_vcpu *vcpu)
2345{
2346        struct mips_coproc *cop0 = vcpu->arch.cop0;
2347        struct kvm_vcpu_arch *arch = &vcpu->arch;
2348        enum emulation_result er = EMULATE_DONE;
2349        unsigned long curr_pc;
2350        union mips_instruction inst;
2351
2352        /*
2353         * Update PC and hold onto current PC in case there is
2354         * an error and we want to rollback the PC
2355         */
2356        curr_pc = vcpu->arch.pc;
2357        er = update_pc(vcpu, cause);
2358        if (er == EMULATE_FAIL)
2359                return er;
2360
2361        /* Fetch the instruction. */
2362        if (cause & CAUSEF_BD)
2363                opc += 1;
2364
2365        inst.word = kvm_get_inst(opc, vcpu);
2366
2367        if (inst.word == KVM_INVALID_INST) {
2368                kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2369                return EMULATE_FAIL;
2370        }
2371
2372        if (inst.r_format.opcode == spec3_op &&
2373            inst.r_format.func == rdhwr_op &&
2374            inst.r_format.rs == 0 &&
2375            (inst.r_format.re >> 3) == 0) {
2376                int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2377                int rd = inst.r_format.rd;
2378                int rt = inst.r_format.rt;
2379                int sel = inst.r_format.re & 0x7;
2380
2381                /* If usermode, check RDHWR rd is allowed by guest HWREna */
2382                if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2383                        kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2384                                  rd, opc);
2385                        goto emulate_ri;
2386                }
2387                switch (rd) {
2388                case MIPS_HWR_CPUNUM:           /* CPU number */
2389                        arch->gprs[rt] = vcpu->vcpu_id;
2390                        break;
2391                case MIPS_HWR_SYNCISTEP:        /* SYNCI length */
2392                        arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2393                                             current_cpu_data.icache.linesz);
2394                        break;
2395                case MIPS_HWR_CC:               /* Read count register */
2396                        arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
2397                        break;
2398                case MIPS_HWR_CCRES:            /* Count register resolution */
2399                        switch (current_cpu_data.cputype) {
2400                        case CPU_20KC:
2401                        case CPU_25KF:
2402                                arch->gprs[rt] = 1;
2403                                break;
2404                        default:
2405                                arch->gprs[rt] = 2;
2406                        }
2407                        break;
2408                case MIPS_HWR_ULR:              /* Read UserLocal register */
2409                        arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2410                        break;
2411
2412                default:
2413                        kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2414                        goto emulate_ri;
2415                }
2416
2417                trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2418                              vcpu->arch.gprs[rt]);
2419        } else {
2420                kvm_debug("Emulate RI not supported @ %p: %#x\n",
2421                          opc, inst.word);
2422                goto emulate_ri;
2423        }
2424
2425        return EMULATE_DONE;
2426
2427emulate_ri:
2428        /*
2429         * Rollback PC (if in branch delay slot then the PC already points to
2430         * branch target), and pass the RI exception to the guest OS.
2431         */
2432        vcpu->arch.pc = curr_pc;
2433        return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2434}
2435
2436enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2437                                                  struct kvm_run *run)
2438{
2439        unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2440        enum emulation_result er = EMULATE_DONE;
2441
2442        if (run->mmio.len > sizeof(*gpr)) {
2443                kvm_err("Bad MMIO length: %d", run->mmio.len);
2444                er = EMULATE_FAIL;
2445                goto done;
2446        }
2447
2448        er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2449        if (er == EMULATE_FAIL)
2450                return er;
2451
2452        switch (run->mmio.len) {
2453        case 4:
2454                *gpr = *(s32 *) run->mmio.data;
2455                break;
2456
2457        case 2:
2458                if (vcpu->mmio_needed == 2)
2459                        *gpr = *(s16 *) run->mmio.data;
2460                else
2461                        *gpr = *(u16 *)run->mmio.data;
2462
2463                break;
2464        case 1:
2465                if (vcpu->mmio_needed == 2)
2466                        *gpr = *(s8 *) run->mmio.data;
2467                else
2468                        *gpr = *(u8 *) run->mmio.data;
2469                break;
2470        }
2471
2472        if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2473                kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2474                          vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2475                          vcpu->mmio_needed);
2476
2477done:
2478        return er;
2479}
2480
2481static enum emulation_result kvm_mips_emulate_exc(u32 cause,
2482                                                  u32 *opc,
2483                                                  struct kvm_run *run,
2484                                                  struct kvm_vcpu *vcpu)
2485{
2486        u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2487        struct mips_coproc *cop0 = vcpu->arch.cop0;
2488        struct kvm_vcpu_arch *arch = &vcpu->arch;
2489        enum emulation_result er = EMULATE_DONE;
2490
2491        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2492                /* save old pc */
2493                kvm_write_c0_guest_epc(cop0, arch->pc);
2494                kvm_set_c0_guest_status(cop0, ST0_EXL);
2495
2496                if (cause & CAUSEF_BD)
2497                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2498                else
2499                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2500
2501                kvm_change_c0_guest_cause(cop0, (0xff),
2502                                          (exccode << CAUSEB_EXCCODE));
2503
2504                /* Set PC to the exception entry point */
2505                arch->pc = KVM_GUEST_KSEG0 + 0x180;
2506                kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2507
2508                kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2509                          exccode, kvm_read_c0_guest_epc(cop0),
2510                          kvm_read_c0_guest_badvaddr(cop0));
2511        } else {
2512                kvm_err("Trying to deliver EXC when EXL is already set\n");
2513                er = EMULATE_FAIL;
2514        }
2515
2516        return er;
2517}
2518
2519enum emulation_result kvm_mips_check_privilege(u32 cause,
2520                                               u32 *opc,
2521                                               struct kvm_run *run,
2522                                               struct kvm_vcpu *vcpu)
2523{
2524        enum emulation_result er = EMULATE_DONE;
2525        u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2526        unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2527
2528        int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2529
2530        if (usermode) {
2531                switch (exccode) {
2532                case EXCCODE_INT:
2533                case EXCCODE_SYS:
2534                case EXCCODE_BP:
2535                case EXCCODE_RI:
2536                case EXCCODE_TR:
2537                case EXCCODE_MSAFPE:
2538                case EXCCODE_FPE:
2539                case EXCCODE_MSADIS:
2540                        break;
2541
2542                case EXCCODE_CPU:
2543                        if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2544                                er = EMULATE_PRIV_FAIL;
2545                        break;
2546
2547                case EXCCODE_MOD:
2548                        break;
2549
2550                case EXCCODE_TLBL:
2551                        /*
2552                         * We we are accessing Guest kernel space, then send an
2553                         * address error exception to the guest
2554                         */
2555                        if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2556                                kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2557                                          badvaddr);
2558                                cause &= ~0xff;
2559                                cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2560                                er = EMULATE_PRIV_FAIL;
2561                        }
2562                        break;
2563
2564                case EXCCODE_TLBS:
2565                        /*
2566                         * We we are accessing Guest kernel space, then send an
2567                         * address error exception to the guest
2568                         */
2569                        if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2570                                kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2571                                          badvaddr);
2572                                cause &= ~0xff;
2573                                cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2574                                er = EMULATE_PRIV_FAIL;
2575                        }
2576                        break;
2577
2578                case EXCCODE_ADES:
2579                        kvm_debug("%s: address error ST @ %#lx\n", __func__,
2580                                  badvaddr);
2581                        if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2582                                cause &= ~0xff;
2583                                cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2584                        }
2585                        er = EMULATE_PRIV_FAIL;
2586                        break;
2587                case EXCCODE_ADEL:
2588                        kvm_debug("%s: address error LD @ %#lx\n", __func__,
2589                                  badvaddr);
2590                        if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2591                                cause &= ~0xff;
2592                                cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2593                        }
2594                        er = EMULATE_PRIV_FAIL;
2595                        break;
2596                default:
2597                        er = EMULATE_PRIV_FAIL;
2598                        break;
2599                }
2600        }
2601
2602        if (er == EMULATE_PRIV_FAIL)
2603                kvm_mips_emulate_exc(cause, opc, run, vcpu);
2604
2605        return er;
2606}
2607
2608/*
2609 * User Address (UA) fault, this could happen if
2610 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2611 *     case we pass on the fault to the guest kernel and let it handle it.
2612 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2613 *     case we inject the TLB from the Guest TLB into the shadow host TLB
2614 */
2615enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2616                                              u32 *opc,
2617                                              struct kvm_run *run,
2618                                              struct kvm_vcpu *vcpu)
2619{
2620        enum emulation_result er = EMULATE_DONE;
2621        u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2622        unsigned long va = vcpu->arch.host_cp0_badvaddr;
2623        int index;
2624
2625        kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2626                  vcpu->arch.host_cp0_badvaddr);
2627
2628        /*
2629         * KVM would not have got the exception if this entry was valid in the
2630         * shadow host TLB. Check the Guest TLB, if the entry is not there then
2631         * send the guest an exception. The guest exc handler should then inject
2632         * an entry into the guest TLB.
2633         */
2634        index = kvm_mips_guest_tlb_lookup(vcpu,
2635                      (va & VPN2_MASK) |
2636                      (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
2637                       KVM_ENTRYHI_ASID));
2638        if (index < 0) {
2639                if (exccode == EXCCODE_TLBL) {
2640                        er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2641                } else if (exccode == EXCCODE_TLBS) {
2642                        er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2643                } else {
2644                        kvm_err("%s: invalid exc code: %d\n", __func__,
2645                                exccode);
2646                        er = EMULATE_FAIL;
2647                }
2648        } else {
2649                struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2650
2651                /*
2652                 * Check if the entry is valid, if not then setup a TLB invalid
2653                 * exception to the guest
2654                 */
2655                if (!TLB_IS_VALID(*tlb, va)) {
2656                        if (exccode == EXCCODE_TLBL) {
2657                                er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2658                                                                vcpu);
2659                        } else if (exccode == EXCCODE_TLBS) {
2660                                er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2661                                                                vcpu);
2662                        } else {
2663                                kvm_err("%s: invalid exc code: %d\n", __func__,
2664                                        exccode);
2665                                er = EMULATE_FAIL;
2666                        }
2667                } else {
2668                        kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2669                                  tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
2670                        /*
2671                         * OK we have a Guest TLB entry, now inject it into the
2672                         * shadow host TLB
2673                         */
2674                        if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
2675                                kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2676                                        __func__, va, index, vcpu,
2677                                        read_c0_entryhi());
2678                                er = EMULATE_FAIL;
2679                        }
2680                }
2681        }
2682
2683        return er;
2684}
2685