linux/arch/mips/kvm/emulate.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: Instruction/Exception emulation
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10 */
  11
  12#include <linux/errno.h>
  13#include <linux/err.h>
  14#include <linux/ktime.h>
  15#include <linux/kvm_host.h>
  16#include <linux/vmalloc.h>
  17#include <linux/fs.h>
  18#include <linux/bootmem.h>
  19#include <linux/random.h>
  20#include <asm/page.h>
  21#include <asm/cacheflush.h>
  22#include <asm/cacheops.h>
  23#include <asm/cpu-info.h>
  24#include <asm/mmu_context.h>
  25#include <asm/tlbflush.h>
  26#include <asm/inst.h>
  27
  28#undef CONFIG_MIPS_MT
  29#include <asm/r4kcache.h>
  30#define CONFIG_MIPS_MT
  31
  32#include "interrupt.h"
  33#include "commpage.h"
  34
  35#include "trace.h"
  36
  37/*
  38 * Compute the return address and do emulate branch simulation, if required.
  39 * This function should be called only in branch delay slot active.
  40 */
  41static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
  42                                  unsigned long *out)
  43{
  44        unsigned int dspcontrol;
  45        union mips_instruction insn;
  46        struct kvm_vcpu_arch *arch = &vcpu->arch;
  47        long epc = instpc;
  48        long nextpc;
  49        int err;
  50
  51        if (epc & 3) {
  52                kvm_err("%s: unaligned epc\n", __func__);
  53                return -EINVAL;
  54        }
  55
  56        /* Read the instruction */
  57        err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word);
  58        if (err)
  59                return err;
  60
  61        switch (insn.i_format.opcode) {
  62                /* jr and jalr are in r_format format. */
  63        case spec_op:
  64                switch (insn.r_format.func) {
  65                case jalr_op:
  66                        arch->gprs[insn.r_format.rd] = epc + 8;
  67                        /* Fall through */
  68                case jr_op:
  69                        nextpc = arch->gprs[insn.r_format.rs];
  70                        break;
  71                default:
  72                        return -EINVAL;
  73                }
  74                break;
  75
  76                /*
  77                 * This group contains:
  78                 * bltz_op, bgez_op, bltzl_op, bgezl_op,
  79                 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
  80                 */
  81        case bcond_op:
  82                switch (insn.i_format.rt) {
  83                case bltz_op:
  84                case bltzl_op:
  85                        if ((long)arch->gprs[insn.i_format.rs] < 0)
  86                                epc = epc + 4 + (insn.i_format.simmediate << 2);
  87                        else
  88                                epc += 8;
  89                        nextpc = epc;
  90                        break;
  91
  92                case bgez_op:
  93                case bgezl_op:
  94                        if ((long)arch->gprs[insn.i_format.rs] >= 0)
  95                                epc = epc + 4 + (insn.i_format.simmediate << 2);
  96                        else
  97                                epc += 8;
  98                        nextpc = epc;
  99                        break;
 100
 101                case bltzal_op:
 102                case bltzall_op:
 103                        arch->gprs[31] = epc + 8;
 104                        if ((long)arch->gprs[insn.i_format.rs] < 0)
 105                                epc = epc + 4 + (insn.i_format.simmediate << 2);
 106                        else
 107                                epc += 8;
 108                        nextpc = epc;
 109                        break;
 110
 111                case bgezal_op:
 112                case bgezall_op:
 113                        arch->gprs[31] = epc + 8;
 114                        if ((long)arch->gprs[insn.i_format.rs] >= 0)
 115                                epc = epc + 4 + (insn.i_format.simmediate << 2);
 116                        else
 117                                epc += 8;
 118                        nextpc = epc;
 119                        break;
 120                case bposge32_op:
 121                        if (!cpu_has_dsp) {
 122                                kvm_err("%s: DSP branch but not DSP ASE\n",
 123                                        __func__);
 124                                return -EINVAL;
 125                        }
 126
 127                        dspcontrol = rddsp(0x01);
 128
 129                        if (dspcontrol >= 32)
 130                                epc = epc + 4 + (insn.i_format.simmediate << 2);
 131                        else
 132                                epc += 8;
 133                        nextpc = epc;
 134                        break;
 135                default:
 136                        return -EINVAL;
 137                }
 138                break;
 139
 140                /* These are unconditional and in j_format. */
 141        case jal_op:
 142                arch->gprs[31] = instpc + 8;
 143        case j_op:
 144                epc += 4;
 145                epc >>= 28;
 146                epc <<= 28;
 147                epc |= (insn.j_format.target << 2);
 148                nextpc = epc;
 149                break;
 150
 151                /* These are conditional and in i_format. */
 152        case beq_op:
 153        case beql_op:
 154                if (arch->gprs[insn.i_format.rs] ==
 155                    arch->gprs[insn.i_format.rt])
 156                        epc = epc + 4 + (insn.i_format.simmediate << 2);
 157                else
 158                        epc += 8;
 159                nextpc = epc;
 160                break;
 161
 162        case bne_op:
 163        case bnel_op:
 164                if (arch->gprs[insn.i_format.rs] !=
 165                    arch->gprs[insn.i_format.rt])
 166                        epc = epc + 4 + (insn.i_format.simmediate << 2);
 167                else
 168                        epc += 8;
 169                nextpc = epc;
 170                break;
 171
 172        case blez_op:   /* POP06 */
 173#ifndef CONFIG_CPU_MIPSR6
 174        case blezl_op:  /* removed in R6 */
 175#endif
 176                if (insn.i_format.rt != 0)
 177                        goto compact_branch;
 178                if ((long)arch->gprs[insn.i_format.rs] <= 0)
 179                        epc = epc + 4 + (insn.i_format.simmediate << 2);
 180                else
 181                        epc += 8;
 182                nextpc = epc;
 183                break;
 184
 185        case bgtz_op:   /* POP07 */
 186#ifndef CONFIG_CPU_MIPSR6
 187        case bgtzl_op:  /* removed in R6 */
 188#endif
 189                if (insn.i_format.rt != 0)
 190                        goto compact_branch;
 191                if ((long)arch->gprs[insn.i_format.rs] > 0)
 192                        epc = epc + 4 + (insn.i_format.simmediate << 2);
 193                else
 194                        epc += 8;
 195                nextpc = epc;
 196                break;
 197
 198                /* And now the FPA/cp1 branch instructions. */
 199        case cop1_op:
 200                kvm_err("%s: unsupported cop1_op\n", __func__);
 201                return -EINVAL;
 202
 203#ifdef CONFIG_CPU_MIPSR6
 204        /* R6 added the following compact branches with forbidden slots */
 205        case blezl_op:  /* POP26 */
 206        case bgtzl_op:  /* POP27 */
 207                /* only rt == 0 isn't compact branch */
 208                if (insn.i_format.rt != 0)
 209                        goto compact_branch;
 210                return -EINVAL;
 211        case pop10_op:
 212        case pop30_op:
 213                /* only rs == rt == 0 is reserved, rest are compact branches */
 214                if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
 215                        goto compact_branch;
 216                return -EINVAL;
 217        case pop66_op:
 218        case pop76_op:
 219                /* only rs == 0 isn't compact branch */
 220                if (insn.i_format.rs != 0)
 221                        goto compact_branch;
 222                return -EINVAL;
 223compact_branch:
 224                /*
 225                 * If we've hit an exception on the forbidden slot, then
 226                 * the branch must not have been taken.
 227                 */
 228                epc += 8;
 229                nextpc = epc;
 230                break;
 231#else
 232compact_branch:
 233                /* Fall through - Compact branches not supported before R6 */
 234#endif
 235        default:
 236                return -EINVAL;
 237        }
 238
 239        *out = nextpc;
 240        return 0;
 241}
 242
 243enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
 244{
 245        int err;
 246
 247        if (cause & CAUSEF_BD) {
 248                err = kvm_compute_return_epc(vcpu, vcpu->arch.pc,
 249                                             &vcpu->arch.pc);
 250                if (err)
 251                        return EMULATE_FAIL;
 252        } else {
 253                vcpu->arch.pc += 4;
 254        }
 255
 256        kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
 257
 258        return EMULATE_DONE;
 259}
 260
 261/**
 262 * kvm_get_badinstr() - Get bad instruction encoding.
 263 * @opc:        Guest pointer to faulting instruction.
 264 * @vcpu:       KVM VCPU information.
 265 *
 266 * Gets the instruction encoding of the faulting instruction, using the saved
 267 * BadInstr register value if it exists, otherwise falling back to reading guest
 268 * memory at @opc.
 269 *
 270 * Returns:     The instruction encoding of the faulting instruction.
 271 */
 272int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
 273{
 274        if (cpu_has_badinstr) {
 275                *out = vcpu->arch.host_cp0_badinstr;
 276                return 0;
 277        } else {
 278                return kvm_get_inst(opc, vcpu, out);
 279        }
 280}
 281
 282/**
 283 * kvm_get_badinstrp() - Get bad prior instruction encoding.
 284 * @opc:        Guest pointer to prior faulting instruction.
 285 * @vcpu:       KVM VCPU information.
 286 *
 287 * Gets the instruction encoding of the prior faulting instruction (the branch
 288 * containing the delay slot which faulted), using the saved BadInstrP register
 289 * value if it exists, otherwise falling back to reading guest memory at @opc.
 290 *
 291 * Returns:     The instruction encoding of the prior faulting instruction.
 292 */
 293int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
 294{
 295        if (cpu_has_badinstrp) {
 296                *out = vcpu->arch.host_cp0_badinstrp;
 297                return 0;
 298        } else {
 299                return kvm_get_inst(opc, vcpu, out);
 300        }
 301}
 302
 303/**
 304 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
 305 * @vcpu:       Virtual CPU.
 306 *
 307 * Returns:     1 if the CP0_Count timer is disabled by either the guest
 308 *              CP0_Cause.DC bit or the count_ctl.DC bit.
 309 *              0 otherwise (in which case CP0_Count timer is running).
 310 */
 311int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
 312{
 313        struct mips_coproc *cop0 = vcpu->arch.cop0;
 314
 315        return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
 316                (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
 317}
 318
 319/**
 320 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
 321 *
 322 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
 323 *
 324 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 325 */
 326static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
 327{
 328        s64 now_ns, periods;
 329        u64 delta;
 330
 331        now_ns = ktime_to_ns(now);
 332        delta = now_ns + vcpu->arch.count_dyn_bias;
 333
 334        if (delta >= vcpu->arch.count_period) {
 335                /* If delta is out of safe range the bias needs adjusting */
 336                periods = div64_s64(now_ns, vcpu->arch.count_period);
 337                vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
 338                /* Recalculate delta with new bias */
 339                delta = now_ns + vcpu->arch.count_dyn_bias;
 340        }
 341
 342        /*
 343         * We've ensured that:
 344         *   delta < count_period
 345         *
 346         * Therefore the intermediate delta*count_hz will never overflow since
 347         * at the boundary condition:
 348         *   delta = count_period
 349         *   delta = NSEC_PER_SEC * 2^32 / count_hz
 350         *   delta * count_hz = NSEC_PER_SEC * 2^32
 351         */
 352        return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
 353}
 354
 355/**
 356 * kvm_mips_count_time() - Get effective current time.
 357 * @vcpu:       Virtual CPU.
 358 *
 359 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
 360 * except when the master disable bit is set in count_ctl, in which case it is
 361 * count_resume, i.e. the time that the count was disabled.
 362 *
 363 * Returns:     Effective monotonic ktime for CP0_Count.
 364 */
 365static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
 366{
 367        if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
 368                return vcpu->arch.count_resume;
 369
 370        return ktime_get();
 371}
 372
 373/**
 374 * kvm_mips_read_count_running() - Read the current count value as if running.
 375 * @vcpu:       Virtual CPU.
 376 * @now:        Kernel time to read CP0_Count at.
 377 *
 378 * Returns the current guest CP0_Count register at time @now and handles if the
 379 * timer interrupt is pending and hasn't been handled yet.
 380 *
 381 * Returns:     The current value of the guest CP0_Count register.
 382 */
 383static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
 384{
 385        struct mips_coproc *cop0 = vcpu->arch.cop0;
 386        ktime_t expires, threshold;
 387        u32 count, compare;
 388        int running;
 389
 390        /* Calculate the biased and scaled guest CP0_Count */
 391        count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
 392        compare = kvm_read_c0_guest_compare(cop0);
 393
 394        /*
 395         * Find whether CP0_Count has reached the closest timer interrupt. If
 396         * not, we shouldn't inject it.
 397         */
 398        if ((s32)(count - compare) < 0)
 399                return count;
 400
 401        /*
 402         * The CP0_Count we're going to return has already reached the closest
 403         * timer interrupt. Quickly check if it really is a new interrupt by
 404         * looking at whether the interval until the hrtimer expiry time is
 405         * less than 1/4 of the timer period.
 406         */
 407        expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
 408        threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
 409        if (ktime_before(expires, threshold)) {
 410                /*
 411                 * Cancel it while we handle it so there's no chance of
 412                 * interference with the timeout handler.
 413                 */
 414                running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
 415
 416                /* Nothing should be waiting on the timeout */
 417                kvm_mips_callbacks->queue_timer_int(vcpu);
 418
 419                /*
 420                 * Restart the timer if it was running based on the expiry time
 421                 * we read, so that we don't push it back 2 periods.
 422                 */
 423                if (running) {
 424                        expires = ktime_add_ns(expires,
 425                                               vcpu->arch.count_period);
 426                        hrtimer_start(&vcpu->arch.comparecount_timer, expires,
 427                                      HRTIMER_MODE_ABS);
 428                }
 429        }
 430
 431        return count;
 432}
 433
 434/**
 435 * kvm_mips_read_count() - Read the current count value.
 436 * @vcpu:       Virtual CPU.
 437 *
 438 * Read the current guest CP0_Count value, taking into account whether the timer
 439 * is stopped.
 440 *
 441 * Returns:     The current guest CP0_Count value.
 442 */
 443u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
 444{
 445        struct mips_coproc *cop0 = vcpu->arch.cop0;
 446
 447        /* If count disabled just read static copy of count */
 448        if (kvm_mips_count_disabled(vcpu))
 449                return kvm_read_c0_guest_count(cop0);
 450
 451        return kvm_mips_read_count_running(vcpu, ktime_get());
 452}
 453
 454/**
 455 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
 456 * @vcpu:       Virtual CPU.
 457 * @count:      Output pointer for CP0_Count value at point of freeze.
 458 *
 459 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
 460 * at the point it was frozen. It is guaranteed that any pending interrupts at
 461 * the point it was frozen are handled, and none after that point.
 462 *
 463 * This is useful where the time/CP0_Count is needed in the calculation of the
 464 * new parameters.
 465 *
 466 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 467 *
 468 * Returns:     The ktime at the point of freeze.
 469 */
 470ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
 471{
 472        ktime_t now;
 473
 474        /* stop hrtimer before finding time */
 475        hrtimer_cancel(&vcpu->arch.comparecount_timer);
 476        now = ktime_get();
 477
 478        /* find count at this point and handle pending hrtimer */
 479        *count = kvm_mips_read_count_running(vcpu, now);
 480
 481        return now;
 482}
 483
 484/**
 485 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
 486 * @vcpu:       Virtual CPU.
 487 * @now:        ktime at point of resume.
 488 * @count:      CP0_Count at point of resume.
 489 *
 490 * Resumes the timer and updates the timer expiry based on @now and @count.
 491 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
 492 * parameters need to be changed.
 493 *
 494 * It is guaranteed that a timer interrupt immediately after resume will be
 495 * handled, but not if CP_Compare is exactly at @count. That case is already
 496 * handled by kvm_mips_freeze_timer().
 497 *
 498 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
 499 */
 500static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
 501                                    ktime_t now, u32 count)
 502{
 503        struct mips_coproc *cop0 = vcpu->arch.cop0;
 504        u32 compare;
 505        u64 delta;
 506        ktime_t expire;
 507
 508        /* Calculate timeout (wrap 0 to 2^32) */
 509        compare = kvm_read_c0_guest_compare(cop0);
 510        delta = (u64)(u32)(compare - count - 1) + 1;
 511        delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
 512        expire = ktime_add_ns(now, delta);
 513
 514        /* Update hrtimer to use new timeout */
 515        hrtimer_cancel(&vcpu->arch.comparecount_timer);
 516        hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
 517}
 518
 519/**
 520 * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
 521 * @vcpu:       Virtual CPU.
 522 * @before:     Time before Count was saved, lower bound of drift calculation.
 523 * @count:      CP0_Count at point of restore.
 524 * @min_drift:  Minimum amount of drift permitted before correction.
 525 *              Must be <= 0.
 526 *
 527 * Restores the timer from a particular @count, accounting for drift. This can
 528 * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
 529 * to be used for a period of time, but the exact ktime corresponding to the
 530 * final Count that must be restored is not known.
 531 *
 532 * It is gauranteed that a timer interrupt immediately after restore will be
 533 * handled, but not if CP0_Compare is exactly at @count. That case should
 534 * already be handled when the hardware timer state is saved.
 535 *
 536 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
 537 * stopped).
 538 *
 539 * Returns:     Amount of correction to count_bias due to drift.
 540 */
 541int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
 542                             u32 count, int min_drift)
 543{
 544        ktime_t now, count_time;
 545        u32 now_count, before_count;
 546        u64 delta;
 547        int drift, ret = 0;
 548
 549        /* Calculate expected count at before */
 550        before_count = vcpu->arch.count_bias +
 551                        kvm_mips_ktime_to_count(vcpu, before);
 552
 553        /*
 554         * Detect significantly negative drift, where count is lower than
 555         * expected. Some negative drift is expected when hardware counter is
 556         * set after kvm_mips_freeze_timer(), and it is harmless to allow the
 557         * time to jump forwards a little, within reason. If the drift is too
 558         * significant, adjust the bias to avoid a big Guest.CP0_Count jump.
 559         */
 560        drift = count - before_count;
 561        if (drift < min_drift) {
 562                count_time = before;
 563                vcpu->arch.count_bias += drift;
 564                ret = drift;
 565                goto resume;
 566        }
 567
 568        /* Calculate expected count right now */
 569        now = ktime_get();
 570        now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
 571
 572        /*
 573         * Detect positive drift, where count is higher than expected, and
 574         * adjust the bias to avoid guest time going backwards.
 575         */
 576        drift = count - now_count;
 577        if (drift > 0) {
 578                count_time = now;
 579                vcpu->arch.count_bias += drift;
 580                ret = drift;
 581                goto resume;
 582        }
 583
 584        /* Subtract nanosecond delta to find ktime when count was read */
 585        delta = (u64)(u32)(now_count - count);
 586        delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
 587        count_time = ktime_sub_ns(now, delta);
 588
 589resume:
 590        /* Resume using the calculated ktime */
 591        kvm_mips_resume_hrtimer(vcpu, count_time, count);
 592        return ret;
 593}
 594
 595/**
 596 * kvm_mips_write_count() - Modify the count and update timer.
 597 * @vcpu:       Virtual CPU.
 598 * @count:      Guest CP0_Count value to set.
 599 *
 600 * Sets the CP0_Count value and updates the timer accordingly.
 601 */
 602void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
 603{
 604        struct mips_coproc *cop0 = vcpu->arch.cop0;
 605        ktime_t now;
 606
 607        /* Calculate bias */
 608        now = kvm_mips_count_time(vcpu);
 609        vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
 610
 611        if (kvm_mips_count_disabled(vcpu))
 612                /* The timer's disabled, adjust the static count */
 613                kvm_write_c0_guest_count(cop0, count);
 614        else
 615                /* Update timeout */
 616                kvm_mips_resume_hrtimer(vcpu, now, count);
 617}
 618
 619/**
 620 * kvm_mips_init_count() - Initialise timer.
 621 * @vcpu:       Virtual CPU.
 622 * @count_hz:   Frequency of timer.
 623 *
 624 * Initialise the timer to the specified frequency, zero it, and set it going if
 625 * it's enabled.
 626 */
 627void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
 628{
 629        vcpu->arch.count_hz = count_hz;
 630        vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
 631        vcpu->arch.count_dyn_bias = 0;
 632
 633        /* Starting at 0 */
 634        kvm_mips_write_count(vcpu, 0);
 635}
 636
 637/**
 638 * kvm_mips_set_count_hz() - Update the frequency of the timer.
 639 * @vcpu:       Virtual CPU.
 640 * @count_hz:   Frequency of CP0_Count timer in Hz.
 641 *
 642 * Change the frequency of the CP0_Count timer. This is done atomically so that
 643 * CP0_Count is continuous and no timer interrupt is lost.
 644 *
 645 * Returns:     -EINVAL if @count_hz is out of range.
 646 *              0 on success.
 647 */
 648int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
 649{
 650        struct mips_coproc *cop0 = vcpu->arch.cop0;
 651        int dc;
 652        ktime_t now;
 653        u32 count;
 654
 655        /* ensure the frequency is in a sensible range... */
 656        if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
 657                return -EINVAL;
 658        /* ... and has actually changed */
 659        if (vcpu->arch.count_hz == count_hz)
 660                return 0;
 661
 662        /* Safely freeze timer so we can keep it continuous */
 663        dc = kvm_mips_count_disabled(vcpu);
 664        if (dc) {
 665                now = kvm_mips_count_time(vcpu);
 666                count = kvm_read_c0_guest_count(cop0);
 667        } else {
 668                now = kvm_mips_freeze_hrtimer(vcpu, &count);
 669        }
 670
 671        /* Update the frequency */
 672        vcpu->arch.count_hz = count_hz;
 673        vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
 674        vcpu->arch.count_dyn_bias = 0;
 675
 676        /* Calculate adjusted bias so dynamic count is unchanged */
 677        vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
 678
 679        /* Update and resume hrtimer */
 680        if (!dc)
 681                kvm_mips_resume_hrtimer(vcpu, now, count);
 682        return 0;
 683}
 684
 685/**
 686 * kvm_mips_write_compare() - Modify compare and update timer.
 687 * @vcpu:       Virtual CPU.
 688 * @compare:    New CP0_Compare value.
 689 * @ack:        Whether to acknowledge timer interrupt.
 690 *
 691 * Update CP0_Compare to a new value and update the timeout.
 692 * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
 693 * any pending timer interrupt is preserved.
 694 */
 695void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
 696{
 697        struct mips_coproc *cop0 = vcpu->arch.cop0;
 698        int dc;
 699        u32 old_compare = kvm_read_c0_guest_compare(cop0);
 700        s32 delta = compare - old_compare;
 701        u32 cause;
 702        ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
 703        u32 count;
 704
 705        /* if unchanged, must just be an ack */
 706        if (old_compare == compare) {
 707                if (!ack)
 708                        return;
 709                kvm_mips_callbacks->dequeue_timer_int(vcpu);
 710                kvm_write_c0_guest_compare(cop0, compare);
 711                return;
 712        }
 713
 714        /*
 715         * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
 716         * too to prevent guest CP0_Count hitting guest CP0_Compare.
 717         *
 718         * The new GTOffset corresponds to the new value of CP0_Compare, and is
 719         * set prior to it being written into the guest context. We disable
 720         * preemption until the new value is written to prevent restore of a
 721         * GTOffset corresponding to the old CP0_Compare value.
 722         */
 723        if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
 724                preempt_disable();
 725                write_c0_gtoffset(compare - read_c0_count());
 726                back_to_back_c0_hazard();
 727        }
 728
 729        /* freeze_hrtimer() takes care of timer interrupts <= count */
 730        dc = kvm_mips_count_disabled(vcpu);
 731        if (!dc)
 732                now = kvm_mips_freeze_hrtimer(vcpu, &count);
 733
 734        if (ack)
 735                kvm_mips_callbacks->dequeue_timer_int(vcpu);
 736        else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
 737                /*
 738                 * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
 739                 * preserve guest CP0_Cause.TI if we don't want to ack it.
 740                 */
 741                cause = kvm_read_c0_guest_cause(cop0);
 742
 743        kvm_write_c0_guest_compare(cop0, compare);
 744
 745        if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
 746                if (delta > 0)
 747                        preempt_enable();
 748
 749                back_to_back_c0_hazard();
 750
 751                if (!ack && cause & CAUSEF_TI)
 752                        kvm_write_c0_guest_cause(cop0, cause);
 753        }
 754
 755        /* resume_hrtimer() takes care of timer interrupts > count */
 756        if (!dc)
 757                kvm_mips_resume_hrtimer(vcpu, now, count);
 758
 759        /*
 760         * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
 761         * until after the new CP0_Compare is written, otherwise new guest
 762         * CP0_Count could hit new guest CP0_Compare.
 763         */
 764        if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
 765                write_c0_gtoffset(compare - read_c0_count());
 766}
 767
 768/**
 769 * kvm_mips_count_disable() - Disable count.
 770 * @vcpu:       Virtual CPU.
 771 *
 772 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
 773 * time will be handled but not after.
 774 *
 775 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
 776 * count_ctl.DC has been set (count disabled).
 777 *
 778 * Returns:     The time that the timer was stopped.
 779 */
 780static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
 781{
 782        struct mips_coproc *cop0 = vcpu->arch.cop0;
 783        u32 count;
 784        ktime_t now;
 785
 786        /* Stop hrtimer */
 787        hrtimer_cancel(&vcpu->arch.comparecount_timer);
 788
 789        /* Set the static count from the dynamic count, handling pending TI */
 790        now = ktime_get();
 791        count = kvm_mips_read_count_running(vcpu, now);
 792        kvm_write_c0_guest_count(cop0, count);
 793
 794        return now;
 795}
 796
 797/**
 798 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
 799 * @vcpu:       Virtual CPU.
 800 *
 801 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
 802 * before the final stop time will be handled if the timer isn't disabled by
 803 * count_ctl.DC, but not after.
 804 *
 805 * Assumes CP0_Cause.DC is clear (count enabled).
 806 */
 807void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
 808{
 809        struct mips_coproc *cop0 = vcpu->arch.cop0;
 810
 811        kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
 812        if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
 813                kvm_mips_count_disable(vcpu);
 814}
 815
 816/**
 817 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
 818 * @vcpu:       Virtual CPU.
 819 *
 820 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
 821 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
 822 * potentially before even returning, so the caller should be careful with
 823 * ordering of CP0_Cause modifications so as not to lose it.
 824 *
 825 * Assumes CP0_Cause.DC is set (count disabled).
 826 */
 827void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
 828{
 829        struct mips_coproc *cop0 = vcpu->arch.cop0;
 830        u32 count;
 831
 832        kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
 833
 834        /*
 835         * Set the dynamic count to match the static count.
 836         * This starts the hrtimer if count_ctl.DC allows it.
 837         * Otherwise it conveniently updates the biases.
 838         */
 839        count = kvm_read_c0_guest_count(cop0);
 840        kvm_mips_write_count(vcpu, count);
 841}
 842
 843/**
 844 * kvm_mips_set_count_ctl() - Update the count control KVM register.
 845 * @vcpu:       Virtual CPU.
 846 * @count_ctl:  Count control register new value.
 847 *
 848 * Set the count control KVM register. The timer is updated accordingly.
 849 *
 850 * Returns:     -EINVAL if reserved bits are set.
 851 *              0 on success.
 852 */
 853int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
 854{
 855        struct mips_coproc *cop0 = vcpu->arch.cop0;
 856        s64 changed = count_ctl ^ vcpu->arch.count_ctl;
 857        s64 delta;
 858        ktime_t expire, now;
 859        u32 count, compare;
 860
 861        /* Only allow defined bits to be changed */
 862        if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
 863                return -EINVAL;
 864
 865        /* Apply new value */
 866        vcpu->arch.count_ctl = count_ctl;
 867
 868        /* Master CP0_Count disable */
 869        if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
 870                /* Is CP0_Cause.DC already disabling CP0_Count? */
 871                if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
 872                        if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
 873                                /* Just record the current time */
 874                                vcpu->arch.count_resume = ktime_get();
 875                } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
 876                        /* disable timer and record current time */
 877                        vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
 878                } else {
 879                        /*
 880                         * Calculate timeout relative to static count at resume
 881                         * time (wrap 0 to 2^32).
 882                         */
 883                        count = kvm_read_c0_guest_count(cop0);
 884                        compare = kvm_read_c0_guest_compare(cop0);
 885                        delta = (u64)(u32)(compare - count - 1) + 1;
 886                        delta = div_u64(delta * NSEC_PER_SEC,
 887                                        vcpu->arch.count_hz);
 888                        expire = ktime_add_ns(vcpu->arch.count_resume, delta);
 889
 890                        /* Handle pending interrupt */
 891                        now = ktime_get();
 892                        if (ktime_compare(now, expire) >= 0)
 893                                /* Nothing should be waiting on the timeout */
 894                                kvm_mips_callbacks->queue_timer_int(vcpu);
 895
 896                        /* Resume hrtimer without changing bias */
 897                        count = kvm_mips_read_count_running(vcpu, now);
 898                        kvm_mips_resume_hrtimer(vcpu, now, count);
 899                }
 900        }
 901
 902        return 0;
 903}
 904
 905/**
 906 * kvm_mips_set_count_resume() - Update the count resume KVM register.
 907 * @vcpu:               Virtual CPU.
 908 * @count_resume:       Count resume register new value.
 909 *
 910 * Set the count resume KVM register.
 911 *
 912 * Returns:     -EINVAL if out of valid range (0..now).
 913 *              0 on success.
 914 */
 915int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
 916{
 917        /*
 918         * It doesn't make sense for the resume time to be in the future, as it
 919         * would be possible for the next interrupt to be more than a full
 920         * period in the future.
 921         */
 922        if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
 923                return -EINVAL;
 924
 925        vcpu->arch.count_resume = ns_to_ktime(count_resume);
 926        return 0;
 927}
 928
 929/**
 930 * kvm_mips_count_timeout() - Push timer forward on timeout.
 931 * @vcpu:       Virtual CPU.
 932 *
 933 * Handle an hrtimer event by push the hrtimer forward a period.
 934 *
 935 * Returns:     The hrtimer_restart value to return to the hrtimer subsystem.
 936 */
 937enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
 938{
 939        /* Add the Count period to the current expiry time */
 940        hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
 941                               vcpu->arch.count_period);
 942        return HRTIMER_RESTART;
 943}
 944
 945enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
 946{
 947        struct mips_coproc *cop0 = vcpu->arch.cop0;
 948        enum emulation_result er = EMULATE_DONE;
 949
 950        if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
 951                kvm_clear_c0_guest_status(cop0, ST0_ERL);
 952                vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
 953        } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
 954                kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
 955                          kvm_read_c0_guest_epc(cop0));
 956                kvm_clear_c0_guest_status(cop0, ST0_EXL);
 957                vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
 958
 959        } else {
 960                kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
 961                        vcpu->arch.pc);
 962                er = EMULATE_FAIL;
 963        }
 964
 965        return er;
 966}
 967
 968enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
 969{
 970        kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
 971                  vcpu->arch.pending_exceptions);
 972
 973        ++vcpu->stat.wait_exits;
 974        trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
 975        if (!vcpu->arch.pending_exceptions) {
 976                kvm_vz_lose_htimer(vcpu);
 977                vcpu->arch.wait = 1;
 978                kvm_vcpu_block(vcpu);
 979
 980                /*
 981                 * We we are runnable, then definitely go off to user space to
 982                 * check if any I/O interrupts are pending.
 983                 */
 984                if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
 985                        kvm_clear_request(KVM_REQ_UNHALT, vcpu);
 986                        vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
 987                }
 988        }
 989
 990        return EMULATE_DONE;
 991}
 992
 993static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
 994                                    unsigned long entryhi)
 995{
 996        struct mips_coproc *cop0 = vcpu->arch.cop0;
 997        struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
 998        int cpu, i;
 999        u32 nasid = entryhi & KVM_ENTRYHI_ASID;
1000
1001        if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
1002                trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
1003                                      KVM_ENTRYHI_ASID, nasid);
1004
1005                /*
1006                 * Flush entries from the GVA page tables.
1007                 * Guest user page table will get flushed lazily on re-entry to
1008                 * guest user if the guest ASID actually changes.
1009                 */
1010                kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
1011
1012                /*
1013                 * Regenerate/invalidate kernel MMU context.
1014                 * The user MMU context will be regenerated lazily on re-entry
1015                 * to guest user if the guest ASID actually changes.
1016                 */
1017                preempt_disable();
1018                cpu = smp_processor_id();
1019                get_new_mmu_context(kern_mm, cpu);
1020                for_each_possible_cpu(i)
1021                        if (i != cpu)
1022                                cpu_context(i, kern_mm) = 0;
1023                preempt_enable();
1024        }
1025        kvm_write_c0_guest_entryhi(cop0, entryhi);
1026}
1027
1028enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
1029{
1030        struct mips_coproc *cop0 = vcpu->arch.cop0;
1031        struct kvm_mips_tlb *tlb;
1032        unsigned long pc = vcpu->arch.pc;
1033        int index;
1034
1035        index = kvm_read_c0_guest_index(cop0);
1036        if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
1037                /* UNDEFINED */
1038                kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
1039                index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
1040        }
1041
1042        tlb = &vcpu->arch.guest_tlb[index];
1043        kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
1044        kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
1045        kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
1046        kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
1047
1048        return EMULATE_DONE;
1049}
1050
1051/**
1052 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
1053 * @vcpu:       VCPU with changed mappings.
1054 * @tlb:        TLB entry being removed.
1055 *
1056 * This is called to indicate a single change in guest MMU mappings, so that we
1057 * can arrange TLB flushes on this and other CPUs.
1058 */
1059static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
1060                                          struct kvm_mips_tlb *tlb)
1061{
1062        struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1063        struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1064        int cpu, i;
1065        bool user;
1066
1067        /* No need to flush for entries which are already invalid */
1068        if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
1069                return;
1070        /* Don't touch host kernel page tables or TLB mappings */
1071        if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
1072                return;
1073        /* User address space doesn't need flushing for KSeg2/3 changes */
1074        user = tlb->tlb_hi < KVM_GUEST_KSEG0;
1075
1076        preempt_disable();
1077
1078        /* Invalidate page table entries */
1079        kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
1080
1081        /*
1082         * Probe the shadow host TLB for the entry being overwritten, if one
1083         * matches, invalidate it
1084         */
1085        kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
1086
1087        /* Invalidate the whole ASID on other CPUs */
1088        cpu = smp_processor_id();
1089        for_each_possible_cpu(i) {
1090                if (i == cpu)
1091                        continue;
1092                if (user)
1093                        cpu_context(i, user_mm) = 0;
1094                cpu_context(i, kern_mm) = 0;
1095        }
1096
1097        preempt_enable();
1098}
1099
1100/* Write Guest TLB Entry @ Index */
1101enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
1102{
1103        struct mips_coproc *cop0 = vcpu->arch.cop0;
1104        int index = kvm_read_c0_guest_index(cop0);
1105        struct kvm_mips_tlb *tlb = NULL;
1106        unsigned long pc = vcpu->arch.pc;
1107
1108        if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
1109                kvm_debug("%s: illegal index: %d\n", __func__, index);
1110                kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
1111                          pc, index, kvm_read_c0_guest_entryhi(cop0),
1112                          kvm_read_c0_guest_entrylo0(cop0),
1113                          kvm_read_c0_guest_entrylo1(cop0),
1114                          kvm_read_c0_guest_pagemask(cop0));
1115                index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
1116        }
1117
1118        tlb = &vcpu->arch.guest_tlb[index];
1119
1120        kvm_mips_invalidate_guest_tlb(vcpu, tlb);
1121
1122        tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
1123        tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
1124        tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
1125        tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
1126
1127        kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
1128                  pc, index, kvm_read_c0_guest_entryhi(cop0),
1129                  kvm_read_c0_guest_entrylo0(cop0),
1130                  kvm_read_c0_guest_entrylo1(cop0),
1131                  kvm_read_c0_guest_pagemask(cop0));
1132
1133        return EMULATE_DONE;
1134}
1135
1136/* Write Guest TLB Entry @ Random Index */
1137enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
1138{
1139        struct mips_coproc *cop0 = vcpu->arch.cop0;
1140        struct kvm_mips_tlb *tlb = NULL;
1141        unsigned long pc = vcpu->arch.pc;
1142        int index;
1143
1144        get_random_bytes(&index, sizeof(index));
1145        index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
1146
1147        tlb = &vcpu->arch.guest_tlb[index];
1148
1149        kvm_mips_invalidate_guest_tlb(vcpu, tlb);
1150
1151        tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
1152        tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
1153        tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
1154        tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
1155
1156        kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
1157                  pc, index, kvm_read_c0_guest_entryhi(cop0),
1158                  kvm_read_c0_guest_entrylo0(cop0),
1159                  kvm_read_c0_guest_entrylo1(cop0));
1160
1161        return EMULATE_DONE;
1162}
1163
1164enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
1165{
1166        struct mips_coproc *cop0 = vcpu->arch.cop0;
1167        long entryhi = kvm_read_c0_guest_entryhi(cop0);
1168        unsigned long pc = vcpu->arch.pc;
1169        int index = -1;
1170
1171        index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1172
1173        kvm_write_c0_guest_index(cop0, index);
1174
1175        kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
1176                  index);
1177
1178        return EMULATE_DONE;
1179}
1180
1181/**
1182 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
1183 * @vcpu:       Virtual CPU.
1184 *
1185 * Finds the mask of bits which are writable in the guest's Config1 CP0
1186 * register, by userland (currently read-only to the guest).
1187 */
1188unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
1189{
1190        unsigned int mask = 0;
1191
1192        /* Permit FPU to be present if FPU is supported */
1193        if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
1194                mask |= MIPS_CONF1_FP;
1195
1196        return mask;
1197}
1198
1199/**
1200 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
1201 * @vcpu:       Virtual CPU.
1202 *
1203 * Finds the mask of bits which are writable in the guest's Config3 CP0
1204 * register, by userland (currently read-only to the guest).
1205 */
1206unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
1207{
1208        /* Config4 and ULRI are optional */
1209        unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
1210
1211        /* Permit MSA to be present if MSA is supported */
1212        if (kvm_mips_guest_can_have_msa(&vcpu->arch))
1213                mask |= MIPS_CONF3_MSA;
1214
1215        return mask;
1216}
1217
1218/**
1219 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1220 * @vcpu:       Virtual CPU.
1221 *
1222 * Finds the mask of bits which are writable in the guest's Config4 CP0
1223 * register, by userland (currently read-only to the guest).
1224 */
1225unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
1226{
1227        /* Config5 is optional */
1228        unsigned int mask = MIPS_CONF_M;
1229
1230        /* KScrExist */
1231        mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT;
1232
1233        return mask;
1234}
1235
1236/**
1237 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1238 * @vcpu:       Virtual CPU.
1239 *
1240 * Finds the mask of bits which are writable in the guest's Config5 CP0
1241 * register, by the guest itself.
1242 */
1243unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
1244{
1245        unsigned int mask = 0;
1246
1247        /* Permit MSAEn changes if MSA supported and enabled */
1248        if (kvm_mips_guest_has_msa(&vcpu->arch))
1249                mask |= MIPS_CONF5_MSAEN;
1250
1251        /*
1252         * Permit guest FPU mode changes if FPU is enabled and the relevant
1253         * feature exists according to FIR register.
1254         */
1255        if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1256                if (cpu_has_fre)
1257                        mask |= MIPS_CONF5_FRE;
1258                /* We don't support UFR or UFE */
1259        }
1260
1261        return mask;
1262}
1263
1264enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1265                                           u32 *opc, u32 cause,
1266                                           struct kvm_run *run,
1267                                           struct kvm_vcpu *vcpu)
1268{
1269        struct mips_coproc *cop0 = vcpu->arch.cop0;
1270        enum emulation_result er = EMULATE_DONE;
1271        u32 rt, rd, sel;
1272        unsigned long curr_pc;
1273
1274        /*
1275         * Update PC and hold onto current PC in case there is
1276         * an error and we want to rollback the PC
1277         */
1278        curr_pc = vcpu->arch.pc;
1279        er = update_pc(vcpu, cause);
1280        if (er == EMULATE_FAIL)
1281                return er;
1282
1283        if (inst.co_format.co) {
1284                switch (inst.co_format.func) {
1285                case tlbr_op:   /*  Read indexed TLB entry  */
1286                        er = kvm_mips_emul_tlbr(vcpu);
1287                        break;
1288                case tlbwi_op:  /*  Write indexed  */
1289                        er = kvm_mips_emul_tlbwi(vcpu);
1290                        break;
1291                case tlbwr_op:  /*  Write random  */
1292                        er = kvm_mips_emul_tlbwr(vcpu);
1293                        break;
1294                case tlbp_op:   /* TLB Probe */
1295                        er = kvm_mips_emul_tlbp(vcpu);
1296                        break;
1297                case rfe_op:
1298                        kvm_err("!!!COP0_RFE!!!\n");
1299                        break;
1300                case eret_op:
1301                        er = kvm_mips_emul_eret(vcpu);
1302                        goto dont_update_pc;
1303                case wait_op:
1304                        er = kvm_mips_emul_wait(vcpu);
1305                        break;
1306                case hypcall_op:
1307                        er = kvm_mips_emul_hypcall(vcpu, inst);
1308                        break;
1309                }
1310        } else {
1311                rt = inst.c0r_format.rt;
1312                rd = inst.c0r_format.rd;
1313                sel = inst.c0r_format.sel;
1314
1315                switch (inst.c0r_format.rs) {
1316                case mfc_op:
1317#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1318                        cop0->stat[rd][sel]++;
1319#endif
1320                        /* Get reg */
1321                        if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1322                                vcpu->arch.gprs[rt] =
1323                                    (s32)kvm_mips_read_count(vcpu);
1324                        } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1325                                vcpu->arch.gprs[rt] = 0x0;
1326#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1327                                kvm_mips_trans_mfc0(inst, opc, vcpu);
1328#endif
1329                        } else {
1330                                vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
1331
1332#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1333                                kvm_mips_trans_mfc0(inst, opc, vcpu);
1334#endif
1335                        }
1336
1337                        trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
1338                                      KVM_TRACE_COP0(rd, sel),
1339                                      vcpu->arch.gprs[rt]);
1340                        break;
1341
1342                case dmfc_op:
1343                        vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1344
1345                        trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
1346                                      KVM_TRACE_COP0(rd, sel),
1347                                      vcpu->arch.gprs[rt]);
1348                        break;
1349
1350                case mtc_op:
1351#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1352                        cop0->stat[rd][sel]++;
1353#endif
1354                        trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
1355                                      KVM_TRACE_COP0(rd, sel),
1356                                      vcpu->arch.gprs[rt]);
1357
1358                        if ((rd == MIPS_CP0_TLB_INDEX)
1359                            && (vcpu->arch.gprs[rt] >=
1360                                KVM_MIPS_GUEST_TLB_SIZE)) {
1361                                kvm_err("Invalid TLB Index: %ld",
1362                                        vcpu->arch.gprs[rt]);
1363                                er = EMULATE_FAIL;
1364                                break;
1365                        }
1366                        if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1367                                /*
1368                                 * Preserve core number, and keep the exception
1369                                 * base in guest KSeg0.
1370                                 */
1371                                kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
1372                                                          vcpu->arch.gprs[rt]);
1373                        } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
1374                                kvm_mips_change_entryhi(vcpu,
1375                                                        vcpu->arch.gprs[rt]);
1376                        }
1377                        /* Are we writing to COUNT */
1378                        else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1379                                kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1380                                goto done;
1381                        } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1382                                /* If we are writing to COMPARE */
1383                                /* Clear pending timer interrupt, if any */
1384                                kvm_mips_write_compare(vcpu,
1385                                                       vcpu->arch.gprs[rt],
1386                                                       true);
1387                        } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1388                                unsigned int old_val, val, change;
1389
1390                                old_val = kvm_read_c0_guest_status(cop0);
1391                                val = vcpu->arch.gprs[rt];
1392                                change = val ^ old_val;
1393
1394                                /* Make sure that the NMI bit is never set */
1395                                val &= ~ST0_NMI;
1396
1397                                /*
1398                                 * Don't allow CU1 or FR to be set unless FPU
1399                                 * capability enabled and exists in guest
1400                                 * configuration.
1401                                 */
1402                                if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1403                                        val &= ~(ST0_CU1 | ST0_FR);
1404
1405                                /*
1406                                 * Also don't allow FR to be set if host doesn't
1407                                 * support it.
1408                                 */
1409                                if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1410                                        val &= ~ST0_FR;
1411
1412
1413                                /* Handle changes in FPU mode */
1414                                preempt_disable();
1415
1416                                /*
1417                                 * FPU and Vector register state is made
1418                                 * UNPREDICTABLE by a change of FR, so don't
1419                                 * even bother saving it.
1420                                 */
1421                                if (change & ST0_FR)
1422                                        kvm_drop_fpu(vcpu);
1423
1424                                /*
1425                                 * If MSA state is already live, it is undefined
1426                                 * how it interacts with FR=0 FPU state, and we
1427                                 * don't want to hit reserved instruction
1428                                 * exceptions trying to save the MSA state later
1429                                 * when CU=1 && FR=1, so play it safe and save
1430                                 * it first.
1431                                 */
1432                                if (change & ST0_CU1 && !(val & ST0_FR) &&
1433                                    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1434                                        kvm_lose_fpu(vcpu);
1435
1436                                /*
1437                                 * Propagate CU1 (FPU enable) changes
1438                                 * immediately if the FPU context is already
1439                                 * loaded. When disabling we leave the context
1440                                 * loaded so it can be quickly enabled again in
1441                                 * the near future.
1442                                 */
1443                                if (change & ST0_CU1 &&
1444                                    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1445                                        change_c0_status(ST0_CU1, val);
1446
1447                                preempt_enable();
1448
1449                                kvm_write_c0_guest_status(cop0, val);
1450
1451#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1452                                /*
1453                                 * If FPU present, we need CU1/FR bits to take
1454                                 * effect fairly soon.
1455                                 */
1456                                if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1457                                        kvm_mips_trans_mtc0(inst, opc, vcpu);
1458#endif
1459                        } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1460                                unsigned int old_val, val, change, wrmask;
1461
1462                                old_val = kvm_read_c0_guest_config5(cop0);
1463                                val = vcpu->arch.gprs[rt];
1464
1465                                /* Only a few bits are writable in Config5 */
1466                                wrmask = kvm_mips_config5_wrmask(vcpu);
1467                                change = (val ^ old_val) & wrmask;
1468                                val = old_val ^ change;
1469
1470
1471                                /* Handle changes in FPU/MSA modes */
1472                                preempt_disable();
1473
1474                                /*
1475                                 * Propagate FRE changes immediately if the FPU
1476                                 * context is already loaded.
1477                                 */
1478                                if (change & MIPS_CONF5_FRE &&
1479                                    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1480                                        change_c0_config5(MIPS_CONF5_FRE, val);
1481
1482                                /*
1483                                 * Propagate MSAEn changes immediately if the
1484                                 * MSA context is already loaded. When disabling
1485                                 * we leave the context loaded so it can be
1486                                 * quickly enabled again in the near future.
1487                                 */
1488                                if (change & MIPS_CONF5_MSAEN &&
1489                                    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1490                                        change_c0_config5(MIPS_CONF5_MSAEN,
1491                                                          val);
1492
1493                                preempt_enable();
1494
1495                                kvm_write_c0_guest_config5(cop0, val);
1496                        } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1497                                u32 old_cause, new_cause;
1498
1499                                old_cause = kvm_read_c0_guest_cause(cop0);
1500                                new_cause = vcpu->arch.gprs[rt];
1501                                /* Update R/W bits */
1502                                kvm_change_c0_guest_cause(cop0, 0x08800300,
1503                                                          new_cause);
1504                                /* DC bit enabling/disabling timer? */
1505                                if ((old_cause ^ new_cause) & CAUSEF_DC) {
1506                                        if (new_cause & CAUSEF_DC)
1507                                                kvm_mips_count_disable_cause(vcpu);
1508                                        else
1509                                                kvm_mips_count_enable_cause(vcpu);
1510                                }
1511                        } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
1512                                u32 mask = MIPS_HWRENA_CPUNUM |
1513                                           MIPS_HWRENA_SYNCISTEP |
1514                                           MIPS_HWRENA_CC |
1515                                           MIPS_HWRENA_CCRES;
1516
1517                                if (kvm_read_c0_guest_config3(cop0) &
1518                                    MIPS_CONF3_ULRI)
1519                                        mask |= MIPS_HWRENA_ULR;
1520                                cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
1521                        } else {
1522                                cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1523#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1524                                kvm_mips_trans_mtc0(inst, opc, vcpu);
1525#endif
1526                        }
1527                        break;
1528
1529                case dmtc_op:
1530                        kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1531                                vcpu->arch.pc, rt, rd, sel);
1532                        trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
1533                                      KVM_TRACE_COP0(rd, sel),
1534                                      vcpu->arch.gprs[rt]);
1535                        er = EMULATE_FAIL;
1536                        break;
1537
1538                case mfmc0_op:
1539#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1540                        cop0->stat[MIPS_CP0_STATUS][0]++;
1541#endif
1542                        if (rt != 0)
1543                                vcpu->arch.gprs[rt] =
1544                                    kvm_read_c0_guest_status(cop0);
1545                        /* EI */
1546                        if (inst.mfmc0_format.sc) {
1547                                kvm_debug("[%#lx] mfmc0_op: EI\n",
1548                                          vcpu->arch.pc);
1549                                kvm_set_c0_guest_status(cop0, ST0_IE);
1550                        } else {
1551                                kvm_debug("[%#lx] mfmc0_op: DI\n",
1552                                          vcpu->arch.pc);
1553                                kvm_clear_c0_guest_status(cop0, ST0_IE);
1554                        }
1555
1556                        break;
1557
1558                case wrpgpr_op:
1559                        {
1560                                u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1561                                u32 pss =
1562                                    (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1563                                /*
1564                                 * We don't support any shadow register sets, so
1565                                 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1566                                 */
1567                                if (css || pss) {
1568                                        er = EMULATE_FAIL;
1569                                        break;
1570                                }
1571                                kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1572                                          vcpu->arch.gprs[rt]);
1573                                vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1574                        }
1575                        break;
1576                default:
1577                        kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1578                                vcpu->arch.pc, inst.c0r_format.rs);
1579                        er = EMULATE_FAIL;
1580                        break;
1581                }
1582        }
1583
1584done:
1585        /* Rollback PC only if emulation was unsuccessful */
1586        if (er == EMULATE_FAIL)
1587                vcpu->arch.pc = curr_pc;
1588
1589dont_update_pc:
1590        /*
1591         * This is for special instructions whose emulation
1592         * updates the PC, so do not overwrite the PC under
1593         * any circumstances
1594         */
1595
1596        return er;
1597}
1598
1599enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1600                                             u32 cause,
1601                                             struct kvm_run *run,
1602                                             struct kvm_vcpu *vcpu)
1603{
1604        enum emulation_result er;
1605        u32 rt;
1606        void *data = run->mmio.data;
1607        unsigned long curr_pc;
1608
1609        /*
1610         * Update PC and hold onto current PC in case there is
1611         * an error and we want to rollback the PC
1612         */
1613        curr_pc = vcpu->arch.pc;
1614        er = update_pc(vcpu, cause);
1615        if (er == EMULATE_FAIL)
1616                return er;
1617
1618        rt = inst.i_format.rt;
1619
1620        run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1621                                                vcpu->arch.host_cp0_badvaddr);
1622        if (run->mmio.phys_addr == KVM_INVALID_ADDR)
1623                goto out_fail;
1624
1625        switch (inst.i_format.opcode) {
1626#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
1627        case sd_op:
1628                run->mmio.len = 8;
1629                *(u64 *)data = vcpu->arch.gprs[rt];
1630
1631                kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
1632                          vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1633                          vcpu->arch.gprs[rt], *(u64 *)data);
1634                break;
1635#endif
1636
1637        case sw_op:
1638                run->mmio.len = 4;
1639                *(u32 *)data = vcpu->arch.gprs[rt];
1640
1641                kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1642                          vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1643                          vcpu->arch.gprs[rt], *(u32 *)data);
1644                break;
1645
1646        case sh_op:
1647                run->mmio.len = 2;
1648                *(u16 *)data = vcpu->arch.gprs[rt];
1649
1650                kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1651                          vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1652                          vcpu->arch.gprs[rt], *(u16 *)data);
1653                break;
1654
1655        case sb_op:
1656                run->mmio.len = 1;
1657                *(u8 *)data = vcpu->arch.gprs[rt];
1658
1659                kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1660                          vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1661                          vcpu->arch.gprs[rt], *(u8 *)data);
1662                break;
1663
1664        default:
1665                kvm_err("Store not yet supported (inst=0x%08x)\n",
1666                        inst.word);
1667                goto out_fail;
1668        }
1669
1670        run->mmio.is_write = 1;
1671        vcpu->mmio_needed = 1;
1672        vcpu->mmio_is_write = 1;
1673        return EMULATE_DO_MMIO;
1674
1675out_fail:
1676        /* Rollback PC if emulation was unsuccessful */
1677        vcpu->arch.pc = curr_pc;
1678        return EMULATE_FAIL;
1679}
1680
1681enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1682                                            u32 cause, struct kvm_run *run,
1683                                            struct kvm_vcpu *vcpu)
1684{
1685        enum emulation_result er;
1686        unsigned long curr_pc;
1687        u32 op, rt;
1688
1689        rt = inst.i_format.rt;
1690        op = inst.i_format.opcode;
1691
1692        /*
1693         * Find the resume PC now while we have safe and easy access to the
1694         * prior branch instruction, and save it for
1695         * kvm_mips_complete_mmio_load() to restore later.
1696         */
1697        curr_pc = vcpu->arch.pc;
1698        er = update_pc(vcpu, cause);
1699        if (er == EMULATE_FAIL)
1700                return er;
1701        vcpu->arch.io_pc = vcpu->arch.pc;
1702        vcpu->arch.pc = curr_pc;
1703
1704        vcpu->arch.io_gpr = rt;
1705
1706        run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
1707                                                vcpu->arch.host_cp0_badvaddr);
1708        if (run->mmio.phys_addr == KVM_INVALID_ADDR)
1709                return EMULATE_FAIL;
1710
1711        vcpu->mmio_needed = 2;  /* signed */
1712        switch (op) {
1713#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
1714        case ld_op:
1715                run->mmio.len = 8;
1716                break;
1717
1718        case lwu_op:
1719                vcpu->mmio_needed = 1;  /* unsigned */
1720                /* fall through */
1721#endif
1722        case lw_op:
1723                run->mmio.len = 4;
1724                break;
1725
1726        case lhu_op:
1727                vcpu->mmio_needed = 1;  /* unsigned */
1728                /* fall through */
1729        case lh_op:
1730                run->mmio.len = 2;
1731                break;
1732
1733        case lbu_op:
1734                vcpu->mmio_needed = 1;  /* unsigned */
1735                /* fall through */
1736        case lb_op:
1737                run->mmio.len = 1;
1738                break;
1739
1740        default:
1741                kvm_err("Load not yet supported (inst=0x%08x)\n",
1742                        inst.word);
1743                vcpu->mmio_needed = 0;
1744                return EMULATE_FAIL;
1745        }
1746
1747        run->mmio.is_write = 0;
1748        vcpu->mmio_is_write = 0;
1749        return EMULATE_DO_MMIO;
1750}
1751
1752#ifndef CONFIG_KVM_MIPS_VZ
1753static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
1754                                                     unsigned long curr_pc,
1755                                                     unsigned long addr,
1756                                                     struct kvm_run *run,
1757                                                     struct kvm_vcpu *vcpu,
1758                                                     u32 cause)
1759{
1760        int err;
1761
1762        for (;;) {
1763                /* Carefully attempt the cache operation */
1764                kvm_trap_emul_gva_lockless_begin(vcpu);
1765                err = fn(addr);
1766                kvm_trap_emul_gva_lockless_end(vcpu);
1767
1768                if (likely(!err))
1769                        return EMULATE_DONE;
1770
1771                /*
1772                 * Try to handle the fault and retry, maybe we just raced with a
1773                 * GVA invalidation.
1774                 */
1775                switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) {
1776                case KVM_MIPS_GVA:
1777                case KVM_MIPS_GPA:
1778                        /* bad virtual or physical address */
1779                        return EMULATE_FAIL;
1780                case KVM_MIPS_TLB:
1781                        /* no matching guest TLB */
1782                        vcpu->arch.host_cp0_badvaddr = addr;
1783                        vcpu->arch.pc = curr_pc;
1784                        kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu);
1785                        return EMULATE_EXCEPT;
1786                case KVM_MIPS_TLBINV:
1787                        /* invalid matching guest TLB */
1788                        vcpu->arch.host_cp0_badvaddr = addr;
1789                        vcpu->arch.pc = curr_pc;
1790                        kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu);
1791                        return EMULATE_EXCEPT;
1792                default:
1793                        break;
1794                };
1795        }
1796}
1797
1798enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1799                                             u32 *opc, u32 cause,
1800                                             struct kvm_run *run,
1801                                             struct kvm_vcpu *vcpu)
1802{
1803        enum emulation_result er = EMULATE_DONE;
1804        u32 cache, op_inst, op, base;
1805        s16 offset;
1806        struct kvm_vcpu_arch *arch = &vcpu->arch;
1807        unsigned long va;
1808        unsigned long curr_pc;
1809
1810        /*
1811         * Update PC and hold onto current PC in case there is
1812         * an error and we want to rollback the PC
1813         */
1814        curr_pc = vcpu->arch.pc;
1815        er = update_pc(vcpu, cause);
1816        if (er == EMULATE_FAIL)
1817                return er;
1818
1819        base = inst.i_format.rs;
1820        op_inst = inst.i_format.rt;
1821        if (cpu_has_mips_r6)
1822                offset = inst.spec3_format.simmediate;
1823        else
1824                offset = inst.i_format.simmediate;
1825        cache = op_inst & CacheOp_Cache;
1826        op = op_inst & CacheOp_Op;
1827
1828        va = arch->gprs[base] + offset;
1829
1830        kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1831                  cache, op, base, arch->gprs[base], offset);
1832
1833        /*
1834         * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1835         * invalidate the caches entirely by stepping through all the
1836         * ways/indexes
1837         */
1838        if (op == Index_Writeback_Inv) {
1839                kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1840                          vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1841                          arch->gprs[base], offset);
1842
1843                if (cache == Cache_D) {
1844#ifdef CONFIG_CPU_R4K_CACHE_TLB
1845                        r4k_blast_dcache();
1846#else
1847                        switch (boot_cpu_type()) {
1848                        case CPU_CAVIUM_OCTEON3:
1849                                /* locally flush icache */
1850                                local_flush_icache_range(0, 0);
1851                                break;
1852                        default:
1853                                __flush_cache_all();
1854                                break;
1855                        }
1856#endif
1857                } else if (cache == Cache_I) {
1858#ifdef CONFIG_CPU_R4K_CACHE_TLB
1859                        r4k_blast_icache();
1860#else
1861                        switch (boot_cpu_type()) {
1862                        case CPU_CAVIUM_OCTEON3:
1863                                /* locally flush icache */
1864                                local_flush_icache_range(0, 0);
1865                                break;
1866                        default:
1867                                flush_icache_all();
1868                                break;
1869                        }
1870#endif
1871                } else {
1872                        kvm_err("%s: unsupported CACHE INDEX operation\n",
1873                                __func__);
1874                        return EMULATE_FAIL;
1875                }
1876
1877#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1878                kvm_mips_trans_cache_index(inst, opc, vcpu);
1879#endif
1880                goto done;
1881        }
1882
1883        /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1884        if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
1885                /*
1886                 * Perform the dcache part of icache synchronisation on the
1887                 * guest's behalf.
1888                 */
1889                er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
1890                                             curr_pc, va, run, vcpu, cause);
1891                if (er != EMULATE_DONE)
1892                        goto done;
1893#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1894                /*
1895                 * Replace the CACHE instruction, with a SYNCI, not the same,
1896                 * but avoids a trap
1897                 */
1898                kvm_mips_trans_cache_va(inst, opc, vcpu);
1899#endif
1900        } else if (op_inst == Hit_Invalidate_I) {
1901                /* Perform the icache synchronisation on the guest's behalf */
1902                er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
1903                                             curr_pc, va, run, vcpu, cause);
1904                if (er != EMULATE_DONE)
1905                        goto done;
1906                er = kvm_mips_guest_cache_op(protected_flush_icache_line,
1907                                             curr_pc, va, run, vcpu, cause);
1908                if (er != EMULATE_DONE)
1909                        goto done;
1910
1911#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1912                /* Replace the CACHE instruction, with a SYNCI */
1913                kvm_mips_trans_cache_va(inst, opc, vcpu);
1914#endif
1915        } else {
1916                kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1917                        cache, op, base, arch->gprs[base], offset);
1918                er = EMULATE_FAIL;
1919        }
1920
1921done:
1922        /* Rollback PC only if emulation was unsuccessful */
1923        if (er == EMULATE_FAIL)
1924                vcpu->arch.pc = curr_pc;
1925        /* Guest exception needs guest to resume */
1926        if (er == EMULATE_EXCEPT)
1927                er = EMULATE_DONE;
1928
1929        return er;
1930}
1931
1932enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
1933                                            struct kvm_run *run,
1934                                            struct kvm_vcpu *vcpu)
1935{
1936        union mips_instruction inst;
1937        enum emulation_result er = EMULATE_DONE;
1938        int err;
1939
1940        /* Fetch the instruction. */
1941        if (cause & CAUSEF_BD)
1942                opc += 1;
1943        err = kvm_get_badinstr(opc, vcpu, &inst.word);
1944        if (err)
1945                return EMULATE_FAIL;
1946
1947        switch (inst.r_format.opcode) {
1948        case cop0_op:
1949                er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1950                break;
1951
1952#ifndef CONFIG_CPU_MIPSR6
1953        case cache_op:
1954                ++vcpu->stat.cache_exits;
1955                trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1956                er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1957                break;
1958#else
1959        case spec3_op:
1960                switch (inst.spec3_format.func) {
1961                case cache6_op:
1962                        ++vcpu->stat.cache_exits;
1963                        trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1964                        er = kvm_mips_emulate_cache(inst, opc, cause, run,
1965                                                    vcpu);
1966                        break;
1967                default:
1968                        goto unknown;
1969                };
1970                break;
1971unknown:
1972#endif
1973
1974        default:
1975                kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1976                        inst.word);
1977                kvm_arch_vcpu_dump_regs(vcpu);
1978                er = EMULATE_FAIL;
1979                break;
1980        }
1981
1982        return er;
1983}
1984#endif /* CONFIG_KVM_MIPS_VZ */
1985
1986/**
1987 * kvm_mips_guest_exception_base() - Find guest exception vector base address.
1988 *
1989 * Returns:     The base address of the current guest exception vector, taking
1990 *              both Guest.CP0_Status.BEV and Guest.CP0_EBase into account.
1991 */
1992long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
1993{
1994        struct mips_coproc *cop0 = vcpu->arch.cop0;
1995
1996        if (kvm_read_c0_guest_status(cop0) & ST0_BEV)
1997                return KVM_GUEST_CKSEG1ADDR(0x1fc00200);
1998        else
1999                return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE;
2000}
2001
2002enum emulation_result kvm_mips_emulate_syscall(u32 cause,
2003                                               u32 *opc,
2004                                               struct kvm_run *run,
2005                                               struct kvm_vcpu *vcpu)
2006{
2007        struct mips_coproc *cop0 = vcpu->arch.cop0;
2008        struct kvm_vcpu_arch *arch = &vcpu->arch;
2009        enum emulation_result er = EMULATE_DONE;
2010
2011        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2012                /* save old pc */
2013                kvm_write_c0_guest_epc(cop0, arch->pc);
2014                kvm_set_c0_guest_status(cop0, ST0_EXL);
2015
2016                if (cause & CAUSEF_BD)
2017                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2018                else
2019                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2020
2021                kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
2022
2023                kvm_change_c0_guest_cause(cop0, (0xff),
2024                                          (EXCCODE_SYS << CAUSEB_EXCCODE));
2025
2026                /* Set PC to the exception entry point */
2027                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2028
2029        } else {
2030                kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
2031                er = EMULATE_FAIL;
2032        }
2033
2034        return er;
2035}
2036
2037enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
2038                                                  u32 *opc,
2039                                                  struct kvm_run *run,
2040                                                  struct kvm_vcpu *vcpu)
2041{
2042        struct mips_coproc *cop0 = vcpu->arch.cop0;
2043        struct kvm_vcpu_arch *arch = &vcpu->arch;
2044        unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
2045                        (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2046
2047        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2048                /* save old pc */
2049                kvm_write_c0_guest_epc(cop0, arch->pc);
2050                kvm_set_c0_guest_status(cop0, ST0_EXL);
2051
2052                if (cause & CAUSEF_BD)
2053                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2054                else
2055                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2056
2057                kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
2058                          arch->pc);
2059
2060                /* set pc to the exception entry point */
2061                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
2062
2063        } else {
2064                kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
2065                          arch->pc);
2066
2067                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2068        }
2069
2070        kvm_change_c0_guest_cause(cop0, (0xff),
2071                                  (EXCCODE_TLBL << CAUSEB_EXCCODE));
2072
2073        /* setup badvaddr, context and entryhi registers for the guest */
2074        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2075        /* XXXKYMA: is the context register used by linux??? */
2076        kvm_write_c0_guest_entryhi(cop0, entryhi);
2077
2078        return EMULATE_DONE;
2079}
2080
2081enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
2082                                                 u32 *opc,
2083                                                 struct kvm_run *run,
2084                                                 struct kvm_vcpu *vcpu)
2085{
2086        struct mips_coproc *cop0 = vcpu->arch.cop0;
2087        struct kvm_vcpu_arch *arch = &vcpu->arch;
2088        unsigned long entryhi =
2089                (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2090                (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2091
2092        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2093                /* save old pc */
2094                kvm_write_c0_guest_epc(cop0, arch->pc);
2095                kvm_set_c0_guest_status(cop0, ST0_EXL);
2096
2097                if (cause & CAUSEF_BD)
2098                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2099                else
2100                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2101
2102                kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
2103                          arch->pc);
2104        } else {
2105                kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
2106                          arch->pc);
2107        }
2108
2109        /* set pc to the exception entry point */
2110        arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2111
2112        kvm_change_c0_guest_cause(cop0, (0xff),
2113                                  (EXCCODE_TLBL << CAUSEB_EXCCODE));
2114
2115        /* setup badvaddr, context and entryhi registers for the guest */
2116        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2117        /* XXXKYMA: is the context register used by linux??? */
2118        kvm_write_c0_guest_entryhi(cop0, entryhi);
2119
2120        return EMULATE_DONE;
2121}
2122
2123enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
2124                                                  u32 *opc,
2125                                                  struct kvm_run *run,
2126                                                  struct kvm_vcpu *vcpu)
2127{
2128        struct mips_coproc *cop0 = vcpu->arch.cop0;
2129        struct kvm_vcpu_arch *arch = &vcpu->arch;
2130        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2131                        (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2132
2133        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2134                /* save old pc */
2135                kvm_write_c0_guest_epc(cop0, arch->pc);
2136                kvm_set_c0_guest_status(cop0, ST0_EXL);
2137
2138                if (cause & CAUSEF_BD)
2139                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2140                else
2141                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2142
2143                kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2144                          arch->pc);
2145
2146                /* Set PC to the exception entry point */
2147                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
2148        } else {
2149                kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2150                          arch->pc);
2151                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2152        }
2153
2154        kvm_change_c0_guest_cause(cop0, (0xff),
2155                                  (EXCCODE_TLBS << CAUSEB_EXCCODE));
2156
2157        /* setup badvaddr, context and entryhi registers for the guest */
2158        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2159        /* XXXKYMA: is the context register used by linux??? */
2160        kvm_write_c0_guest_entryhi(cop0, entryhi);
2161
2162        return EMULATE_DONE;
2163}
2164
2165enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
2166                                                 u32 *opc,
2167                                                 struct kvm_run *run,
2168                                                 struct kvm_vcpu *vcpu)
2169{
2170        struct mips_coproc *cop0 = vcpu->arch.cop0;
2171        struct kvm_vcpu_arch *arch = &vcpu->arch;
2172        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2173                (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2174
2175        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2176                /* save old pc */
2177                kvm_write_c0_guest_epc(cop0, arch->pc);
2178                kvm_set_c0_guest_status(cop0, ST0_EXL);
2179
2180                if (cause & CAUSEF_BD)
2181                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2182                else
2183                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2184
2185                kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
2186                          arch->pc);
2187        } else {
2188                kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
2189                          arch->pc);
2190        }
2191
2192        /* Set PC to the exception entry point */
2193        arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2194
2195        kvm_change_c0_guest_cause(cop0, (0xff),
2196                                  (EXCCODE_TLBS << CAUSEB_EXCCODE));
2197
2198        /* setup badvaddr, context and entryhi registers for the guest */
2199        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2200        /* XXXKYMA: is the context register used by linux??? */
2201        kvm_write_c0_guest_entryhi(cop0, entryhi);
2202
2203        return EMULATE_DONE;
2204}
2205
2206enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
2207                                              u32 *opc,
2208                                              struct kvm_run *run,
2209                                              struct kvm_vcpu *vcpu)
2210{
2211        struct mips_coproc *cop0 = vcpu->arch.cop0;
2212        unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
2213                        (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
2214        struct kvm_vcpu_arch *arch = &vcpu->arch;
2215
2216        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2217                /* save old pc */
2218                kvm_write_c0_guest_epc(cop0, arch->pc);
2219                kvm_set_c0_guest_status(cop0, ST0_EXL);
2220
2221                if (cause & CAUSEF_BD)
2222                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2223                else
2224                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2225
2226                kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
2227                          arch->pc);
2228        } else {
2229                kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
2230                          arch->pc);
2231        }
2232
2233        arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2234
2235        kvm_change_c0_guest_cause(cop0, (0xff),
2236                                  (EXCCODE_MOD << CAUSEB_EXCCODE));
2237
2238        /* setup badvaddr, context and entryhi registers for the guest */
2239        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2240        /* XXXKYMA: is the context register used by linux??? */
2241        kvm_write_c0_guest_entryhi(cop0, entryhi);
2242
2243        return EMULATE_DONE;
2244}
2245
2246enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
2247                                               u32 *opc,
2248                                               struct kvm_run *run,
2249                                               struct kvm_vcpu *vcpu)
2250{
2251        struct mips_coproc *cop0 = vcpu->arch.cop0;
2252        struct kvm_vcpu_arch *arch = &vcpu->arch;
2253
2254        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2255                /* save old pc */
2256                kvm_write_c0_guest_epc(cop0, arch->pc);
2257                kvm_set_c0_guest_status(cop0, ST0_EXL);
2258
2259                if (cause & CAUSEF_BD)
2260                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2261                else
2262                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2263
2264        }
2265
2266        arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2267
2268        kvm_change_c0_guest_cause(cop0, (0xff),
2269                                  (EXCCODE_CPU << CAUSEB_EXCCODE));
2270        kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2271
2272        return EMULATE_DONE;
2273}
2274
2275enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
2276                                              u32 *opc,
2277                                              struct kvm_run *run,
2278                                              struct kvm_vcpu *vcpu)
2279{
2280        struct mips_coproc *cop0 = vcpu->arch.cop0;
2281        struct kvm_vcpu_arch *arch = &vcpu->arch;
2282        enum emulation_result er = EMULATE_DONE;
2283
2284        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2285                /* save old pc */
2286                kvm_write_c0_guest_epc(cop0, arch->pc);
2287                kvm_set_c0_guest_status(cop0, ST0_EXL);
2288
2289                if (cause & CAUSEF_BD)
2290                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2291                else
2292                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2293
2294                kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2295
2296                kvm_change_c0_guest_cause(cop0, (0xff),
2297                                          (EXCCODE_RI << CAUSEB_EXCCODE));
2298
2299                /* Set PC to the exception entry point */
2300                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2301
2302        } else {
2303                kvm_err("Trying to deliver RI when EXL is already set\n");
2304                er = EMULATE_FAIL;
2305        }
2306
2307        return er;
2308}
2309
2310enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
2311                                              u32 *opc,
2312                                              struct kvm_run *run,
2313                                              struct kvm_vcpu *vcpu)
2314{
2315        struct mips_coproc *cop0 = vcpu->arch.cop0;
2316        struct kvm_vcpu_arch *arch = &vcpu->arch;
2317        enum emulation_result er = EMULATE_DONE;
2318
2319        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2320                /* save old pc */
2321                kvm_write_c0_guest_epc(cop0, arch->pc);
2322                kvm_set_c0_guest_status(cop0, ST0_EXL);
2323
2324                if (cause & CAUSEF_BD)
2325                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2326                else
2327                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2328
2329                kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2330
2331                kvm_change_c0_guest_cause(cop0, (0xff),
2332                                          (EXCCODE_BP << CAUSEB_EXCCODE));
2333
2334                /* Set PC to the exception entry point */
2335                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2336
2337        } else {
2338                kvm_err("Trying to deliver BP when EXL is already set\n");
2339                er = EMULATE_FAIL;
2340        }
2341
2342        return er;
2343}
2344
2345enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
2346                                                u32 *opc,
2347                                                struct kvm_run *run,
2348                                                struct kvm_vcpu *vcpu)
2349{
2350        struct mips_coproc *cop0 = vcpu->arch.cop0;
2351        struct kvm_vcpu_arch *arch = &vcpu->arch;
2352        enum emulation_result er = EMULATE_DONE;
2353
2354        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2355                /* save old pc */
2356                kvm_write_c0_guest_epc(cop0, arch->pc);
2357                kvm_set_c0_guest_status(cop0, ST0_EXL);
2358
2359                if (cause & CAUSEF_BD)
2360                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2361                else
2362                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2363
2364                kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2365
2366                kvm_change_c0_guest_cause(cop0, (0xff),
2367                                          (EXCCODE_TR << CAUSEB_EXCCODE));
2368
2369                /* Set PC to the exception entry point */
2370                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2371
2372        } else {
2373                kvm_err("Trying to deliver TRAP when EXL is already set\n");
2374                er = EMULATE_FAIL;
2375        }
2376
2377        return er;
2378}
2379
2380enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
2381                                                  u32 *opc,
2382                                                  struct kvm_run *run,
2383                                                  struct kvm_vcpu *vcpu)
2384{
2385        struct mips_coproc *cop0 = vcpu->arch.cop0;
2386        struct kvm_vcpu_arch *arch = &vcpu->arch;
2387        enum emulation_result er = EMULATE_DONE;
2388
2389        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2390                /* save old pc */
2391                kvm_write_c0_guest_epc(cop0, arch->pc);
2392                kvm_set_c0_guest_status(cop0, ST0_EXL);
2393
2394                if (cause & CAUSEF_BD)
2395                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2396                else
2397                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2398
2399                kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
2400
2401                kvm_change_c0_guest_cause(cop0, (0xff),
2402                                          (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
2403
2404                /* Set PC to the exception entry point */
2405                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2406
2407        } else {
2408                kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
2409                er = EMULATE_FAIL;
2410        }
2411
2412        return er;
2413}
2414
2415enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
2416                                               u32 *opc,
2417                                               struct kvm_run *run,
2418                                               struct kvm_vcpu *vcpu)
2419{
2420        struct mips_coproc *cop0 = vcpu->arch.cop0;
2421        struct kvm_vcpu_arch *arch = &vcpu->arch;
2422        enum emulation_result er = EMULATE_DONE;
2423
2424        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2425                /* save old pc */
2426                kvm_write_c0_guest_epc(cop0, arch->pc);
2427                kvm_set_c0_guest_status(cop0, ST0_EXL);
2428
2429                if (cause & CAUSEF_BD)
2430                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2431                else
2432                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2433
2434                kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
2435
2436                kvm_change_c0_guest_cause(cop0, (0xff),
2437                                          (EXCCODE_FPE << CAUSEB_EXCCODE));
2438
2439                /* Set PC to the exception entry point */
2440                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2441
2442        } else {
2443                kvm_err("Trying to deliver FPE when EXL is already set\n");
2444                er = EMULATE_FAIL;
2445        }
2446
2447        return er;
2448}
2449
2450enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
2451                                                  u32 *opc,
2452                                                  struct kvm_run *run,
2453                                                  struct kvm_vcpu *vcpu)
2454{
2455        struct mips_coproc *cop0 = vcpu->arch.cop0;
2456        struct kvm_vcpu_arch *arch = &vcpu->arch;
2457        enum emulation_result er = EMULATE_DONE;
2458
2459        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2460                /* save old pc */
2461                kvm_write_c0_guest_epc(cop0, arch->pc);
2462                kvm_set_c0_guest_status(cop0, ST0_EXL);
2463
2464                if (cause & CAUSEF_BD)
2465                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2466                else
2467                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2468
2469                kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
2470
2471                kvm_change_c0_guest_cause(cop0, (0xff),
2472                                          (EXCCODE_MSADIS << CAUSEB_EXCCODE));
2473
2474                /* Set PC to the exception entry point */
2475                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2476
2477        } else {
2478                kvm_err("Trying to deliver MSADIS when EXL is already set\n");
2479                er = EMULATE_FAIL;
2480        }
2481
2482        return er;
2483}
2484
2485enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
2486                                         struct kvm_run *run,
2487                                         struct kvm_vcpu *vcpu)
2488{
2489        struct mips_coproc *cop0 = vcpu->arch.cop0;
2490        struct kvm_vcpu_arch *arch = &vcpu->arch;
2491        enum emulation_result er = EMULATE_DONE;
2492        unsigned long curr_pc;
2493        union mips_instruction inst;
2494        int err;
2495
2496        /*
2497         * Update PC and hold onto current PC in case there is
2498         * an error and we want to rollback the PC
2499         */
2500        curr_pc = vcpu->arch.pc;
2501        er = update_pc(vcpu, cause);
2502        if (er == EMULATE_FAIL)
2503                return er;
2504
2505        /* Fetch the instruction. */
2506        if (cause & CAUSEF_BD)
2507                opc += 1;
2508        err = kvm_get_badinstr(opc, vcpu, &inst.word);
2509        if (err) {
2510                kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err);
2511                return EMULATE_FAIL;
2512        }
2513
2514        if (inst.r_format.opcode == spec3_op &&
2515            inst.r_format.func == rdhwr_op &&
2516            inst.r_format.rs == 0 &&
2517            (inst.r_format.re >> 3) == 0) {
2518                int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2519                int rd = inst.r_format.rd;
2520                int rt = inst.r_format.rt;
2521                int sel = inst.r_format.re & 0x7;
2522
2523                /* If usermode, check RDHWR rd is allowed by guest HWREna */
2524                if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2525                        kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2526                                  rd, opc);
2527                        goto emulate_ri;
2528                }
2529                switch (rd) {
2530                case MIPS_HWR_CPUNUM:           /* CPU number */
2531                        arch->gprs[rt] = vcpu->vcpu_id;
2532                        break;
2533                case MIPS_HWR_SYNCISTEP:        /* SYNCI length */
2534                        arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2535                                             current_cpu_data.icache.linesz);
2536                        break;
2537                case MIPS_HWR_CC:               /* Read count register */
2538                        arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
2539                        break;
2540                case MIPS_HWR_CCRES:            /* Count register resolution */
2541                        switch (current_cpu_data.cputype) {
2542                        case CPU_20KC:
2543                        case CPU_25KF:
2544                                arch->gprs[rt] = 1;
2545                                break;
2546                        default:
2547                                arch->gprs[rt] = 2;
2548                        }
2549                        break;
2550                case MIPS_HWR_ULR:              /* Read UserLocal register */
2551                        arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2552                        break;
2553
2554                default:
2555                        kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2556                        goto emulate_ri;
2557                }
2558
2559                trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
2560                              vcpu->arch.gprs[rt]);
2561        } else {
2562                kvm_debug("Emulate RI not supported @ %p: %#x\n",
2563                          opc, inst.word);
2564                goto emulate_ri;
2565        }
2566
2567        return EMULATE_DONE;
2568
2569emulate_ri:
2570        /*
2571         * Rollback PC (if in branch delay slot then the PC already points to
2572         * branch target), and pass the RI exception to the guest OS.
2573         */
2574        vcpu->arch.pc = curr_pc;
2575        return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2576}
2577
2578enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2579                                                  struct kvm_run *run)
2580{
2581        unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2582        enum emulation_result er = EMULATE_DONE;
2583
2584        if (run->mmio.len > sizeof(*gpr)) {
2585                kvm_err("Bad MMIO length: %d", run->mmio.len);
2586                er = EMULATE_FAIL;
2587                goto done;
2588        }
2589
2590        /* Restore saved resume PC */
2591        vcpu->arch.pc = vcpu->arch.io_pc;
2592
2593        switch (run->mmio.len) {
2594        case 8:
2595                *gpr = *(s64 *)run->mmio.data;
2596                break;
2597
2598        case 4:
2599                if (vcpu->mmio_needed == 2)
2600                        *gpr = *(s32 *)run->mmio.data;
2601                else
2602                        *gpr = *(u32 *)run->mmio.data;
2603                break;
2604
2605        case 2:
2606                if (vcpu->mmio_needed == 2)
2607                        *gpr = *(s16 *) run->mmio.data;
2608                else
2609                        *gpr = *(u16 *)run->mmio.data;
2610
2611                break;
2612        case 1:
2613                if (vcpu->mmio_needed == 2)
2614                        *gpr = *(s8 *) run->mmio.data;
2615                else
2616                        *gpr = *(u8 *) run->mmio.data;
2617                break;
2618        }
2619
2620done:
2621        return er;
2622}
2623
2624static enum emulation_result kvm_mips_emulate_exc(u32 cause,
2625                                                  u32 *opc,
2626                                                  struct kvm_run *run,
2627                                                  struct kvm_vcpu *vcpu)
2628{
2629        u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2630        struct mips_coproc *cop0 = vcpu->arch.cop0;
2631        struct kvm_vcpu_arch *arch = &vcpu->arch;
2632        enum emulation_result er = EMULATE_DONE;
2633
2634        if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2635                /* save old pc */
2636                kvm_write_c0_guest_epc(cop0, arch->pc);
2637                kvm_set_c0_guest_status(cop0, ST0_EXL);
2638
2639                if (cause & CAUSEF_BD)
2640                        kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2641                else
2642                        kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2643
2644                kvm_change_c0_guest_cause(cop0, (0xff),
2645                                          (exccode << CAUSEB_EXCCODE));
2646
2647                /* Set PC to the exception entry point */
2648                arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
2649                kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2650
2651                kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2652                          exccode, kvm_read_c0_guest_epc(cop0),
2653                          kvm_read_c0_guest_badvaddr(cop0));
2654        } else {
2655                kvm_err("Trying to deliver EXC when EXL is already set\n");
2656                er = EMULATE_FAIL;
2657        }
2658
2659        return er;
2660}
2661
2662enum emulation_result kvm_mips_check_privilege(u32 cause,
2663                                               u32 *opc,
2664                                               struct kvm_run *run,
2665                                               struct kvm_vcpu *vcpu)
2666{
2667        enum emulation_result er = EMULATE_DONE;
2668        u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2669        unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2670
2671        int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2672
2673        if (usermode) {
2674                switch (exccode) {
2675                case EXCCODE_INT:
2676                case EXCCODE_SYS:
2677                case EXCCODE_BP:
2678                case EXCCODE_RI:
2679                case EXCCODE_TR:
2680                case EXCCODE_MSAFPE:
2681                case EXCCODE_FPE:
2682                case EXCCODE_MSADIS:
2683                        break;
2684
2685                case EXCCODE_CPU:
2686                        if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2687                                er = EMULATE_PRIV_FAIL;
2688                        break;
2689
2690                case EXCCODE_MOD:
2691                        break;
2692
2693                case EXCCODE_TLBL:
2694                        /*
2695                         * We we are accessing Guest kernel space, then send an
2696                         * address error exception to the guest
2697                         */
2698                        if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2699                                kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2700                                          badvaddr);
2701                                cause &= ~0xff;
2702                                cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
2703                                er = EMULATE_PRIV_FAIL;
2704                        }
2705                        break;
2706
2707                case EXCCODE_TLBS:
2708                        /*
2709                         * We we are accessing Guest kernel space, then send an
2710                         * address error exception to the guest
2711                         */
2712                        if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2713                                kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2714                                          badvaddr);
2715                                cause &= ~0xff;
2716                                cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
2717                                er = EMULATE_PRIV_FAIL;
2718                        }
2719                        break;
2720
2721                case EXCCODE_ADES:
2722                        kvm_debug("%s: address error ST @ %#lx\n", __func__,
2723                                  badvaddr);
2724                        if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2725                                cause &= ~0xff;
2726                                cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
2727                        }
2728                        er = EMULATE_PRIV_FAIL;
2729                        break;
2730                case EXCCODE_ADEL:
2731                        kvm_debug("%s: address error LD @ %#lx\n", __func__,
2732                                  badvaddr);
2733                        if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2734                                cause &= ~0xff;
2735                                cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
2736                        }
2737                        er = EMULATE_PRIV_FAIL;
2738                        break;
2739                default:
2740                        er = EMULATE_PRIV_FAIL;
2741                        break;
2742                }
2743        }
2744
2745        if (er == EMULATE_PRIV_FAIL)
2746                kvm_mips_emulate_exc(cause, opc, run, vcpu);
2747
2748        return er;
2749}
2750
2751/*
2752 * User Address (UA) fault, this could happen if
2753 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2754 *     case we pass on the fault to the guest kernel and let it handle it.
2755 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2756 *     case we inject the TLB from the Guest TLB into the shadow host TLB
2757 */
2758enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
2759                                              u32 *opc,
2760                                              struct kvm_run *run,
2761                                              struct kvm_vcpu *vcpu,
2762                                              bool write_fault)
2763{
2764        enum emulation_result er = EMULATE_DONE;
2765        u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2766        unsigned long va = vcpu->arch.host_cp0_badvaddr;
2767        int index;
2768
2769        kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
2770                  vcpu->arch.host_cp0_badvaddr);
2771
2772        /*
2773         * KVM would not have got the exception if this entry was valid in the
2774         * shadow host TLB. Check the Guest TLB, if the entry is not there then
2775         * send the guest an exception. The guest exc handler should then inject
2776         * an entry into the guest TLB.
2777         */
2778        index = kvm_mips_guest_tlb_lookup(vcpu,
2779                      (va & VPN2_MASK) |
2780                      (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
2781                       KVM_ENTRYHI_ASID));
2782        if (index < 0) {
2783                if (exccode == EXCCODE_TLBL) {
2784                        er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2785                } else if (exccode == EXCCODE_TLBS) {
2786                        er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2787                } else {
2788                        kvm_err("%s: invalid exc code: %d\n", __func__,
2789                                exccode);
2790                        er = EMULATE_FAIL;
2791                }
2792        } else {
2793                struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2794
2795                /*
2796                 * Check if the entry is valid, if not then setup a TLB invalid
2797                 * exception to the guest
2798                 */
2799                if (!TLB_IS_VALID(*tlb, va)) {
2800                        if (exccode == EXCCODE_TLBL) {
2801                                er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2802                                                                vcpu);
2803                        } else if (exccode == EXCCODE_TLBS) {
2804                                er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2805                                                                vcpu);
2806                        } else {
2807                                kvm_err("%s: invalid exc code: %d\n", __func__,
2808                                        exccode);
2809                                er = EMULATE_FAIL;
2810                        }
2811                } else {
2812                        kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2813                                  tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
2814                        /*
2815                         * OK we have a Guest TLB entry, now inject it into the
2816                         * shadow host TLB
2817                         */
2818                        if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
2819                                                                 write_fault)) {
2820                                kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
2821                                        __func__, va, index, vcpu,
2822                                        read_c0_entryhi());
2823                                er = EMULATE_FAIL;
2824                        }
2825                }
2826        }
2827
2828        return er;
2829}
2830