linux/arch/powerpc/kvm/book3s_hv_ras.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * Copyright 2012 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   7 */
   8
   9#include <linux/types.h>
  10#include <linux/string.h>
  11#include <linux/kvm.h>
  12#include <linux/kvm_host.h>
  13#include <linux/kernel.h>
  14#include <asm/opal.h>
  15#include <asm/mce.h>
  16
  17/* SRR1 bits for machine check on POWER7 */
  18#define SRR1_MC_LDSTERR         (1ul << (63-42))
  19#define SRR1_MC_IFETCH_SH       (63-45)
  20#define SRR1_MC_IFETCH_MASK     0x7
  21#define SRR1_MC_IFETCH_SLBPAR           2       /* SLB parity error */
  22#define SRR1_MC_IFETCH_SLBMULTI         3       /* SLB multi-hit */
  23#define SRR1_MC_IFETCH_SLBPARMULTI      4       /* SLB parity + multi-hit */
  24#define SRR1_MC_IFETCH_TLBMULTI         5       /* I-TLB multi-hit */
  25
  26/* DSISR bits for machine check on POWER7 */
  27#define DSISR_MC_DERAT_MULTI    0x800           /* D-ERAT multi-hit */
  28#define DSISR_MC_TLB_MULTI      0x400           /* D-TLB multi-hit */
  29#define DSISR_MC_SLB_PARITY     0x100           /* SLB parity error */
  30#define DSISR_MC_SLB_MULTI      0x080           /* SLB multi-hit */
  31#define DSISR_MC_SLB_PARMULTI   0x040           /* SLB parity + multi-hit */
  32
  33/* POWER7 SLB flush and reload */
  34static void reload_slb(struct kvm_vcpu *vcpu)
  35{
  36        struct slb_shadow *slb;
  37        unsigned long i, n;
  38
  39        /* First clear out SLB */
  40        asm volatile("slbmte %0,%0; slbia" : : "r" (0));
  41
  42        /* Do they have an SLB shadow buffer registered? */
  43        slb = vcpu->arch.slb_shadow.pinned_addr;
  44        if (!slb)
  45                return;
  46
  47        /* Sanity check */
  48        n = min_t(u32, be32_to_cpu(slb->persistent), SLB_MIN_SIZE);
  49        if ((void *) &slb->save_area[n] > vcpu->arch.slb_shadow.pinned_end)
  50                return;
  51
  52        /* Load up the SLB from that */
  53        for (i = 0; i < n; ++i) {
  54                unsigned long rb = be64_to_cpu(slb->save_area[i].esid);
  55                unsigned long rs = be64_to_cpu(slb->save_area[i].vsid);
  56
  57                rb = (rb & ~0xFFFul) | i;       /* insert entry number */
  58                asm volatile("slbmte %0,%1" : : "r" (rs), "r" (rb));
  59        }
  60}
  61
  62/*
  63 * On POWER7, see if we can handle a machine check that occurred inside
  64 * the guest in real mode, without switching to the host partition.
  65 *
  66 * Returns: 0 => exit guest, 1 => deliver machine check to guest
  67 */
  68static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
  69{
  70        unsigned long srr1 = vcpu->arch.shregs.msr;
  71        struct machine_check_event mce_evt;
  72        long handled = 1;
  73
  74        if (srr1 & SRR1_MC_LDSTERR) {
  75                /* error on load/store */
  76                unsigned long dsisr = vcpu->arch.shregs.dsisr;
  77
  78                if (dsisr & (DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
  79                             DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI)) {
  80                        /* flush and reload SLB; flushes D-ERAT too */
  81                        reload_slb(vcpu);
  82                        dsisr &= ~(DSISR_MC_SLB_PARMULTI | DSISR_MC_SLB_MULTI |
  83                                   DSISR_MC_SLB_PARITY | DSISR_MC_DERAT_MULTI);
  84                }
  85                if (dsisr & DSISR_MC_TLB_MULTI) {
  86                        if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
  87                                cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
  88                        dsisr &= ~DSISR_MC_TLB_MULTI;
  89                }
  90                /* Any other errors we don't understand? */
  91                if (dsisr & 0xffffffffUL)
  92                        handled = 0;
  93        }
  94
  95        switch ((srr1 >> SRR1_MC_IFETCH_SH) & SRR1_MC_IFETCH_MASK) {
  96        case 0:
  97                break;
  98        case SRR1_MC_IFETCH_SLBPAR:
  99        case SRR1_MC_IFETCH_SLBMULTI:
 100        case SRR1_MC_IFETCH_SLBPARMULTI:
 101                reload_slb(vcpu);
 102                break;
 103        case SRR1_MC_IFETCH_TLBMULTI:
 104                if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
 105                        cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
 106                break;
 107        default:
 108                handled = 0;
 109        }
 110
 111        /*
 112         * See if we have already handled the condition in the linux host.
 113         * We assume that if the condition is recovered then linux host
 114         * will have generated an error log event that we will pick
 115         * up and log later.
 116         * Don't release mce event now. We will queue up the event so that
 117         * we can log the MCE event info on host console.
 118         */
 119        if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
 120                goto out;
 121
 122        if (mce_evt.version == MCE_V1 &&
 123            (mce_evt.severity == MCE_SEV_NO_ERROR ||
 124             mce_evt.disposition == MCE_DISPOSITION_RECOVERED))
 125                handled = 1;
 126
 127out:
 128        /*
 129         * We are now going enter guest either through machine check
 130         * interrupt (for unhandled errors) or will continue from
 131         * current HSRR0 (for handled errors) in guest. Hence
 132         * queue up the event so that we can log it from host console later.
 133         */
 134        machine_check_queue_event();
 135
 136        return handled;
 137}
 138
 139long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
 140{
 141        return kvmppc_realmode_mc_power7(vcpu);
 142}
 143