linux/virt/kvm/arm/aarch32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * (not much of an) Emulation layer for 32bit guests.
   4 *
   5 * Copyright (C) 2012,2013 - ARM Ltd
   6 * Author: Marc Zyngier <marc.zyngier@arm.com>
   7 *
   8 * based on arch/arm/kvm/emulate.c
   9 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  10 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  11 */
  12
  13#include <linux/kvm_host.h>
  14#include <asm/kvm_emulate.h>
  15#include <asm/kvm_hyp.h>
  16
  17/*
  18 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
  19 */
  20static const u8 return_offsets[8][2] = {
  21        [0] = { 0, 0 },         /* Reset, unused */
  22        [1] = { 4, 2 },         /* Undefined */
  23        [2] = { 0, 0 },         /* SVC, unused */
  24        [3] = { 4, 4 },         /* Prefetch abort */
  25        [4] = { 8, 8 },         /* Data abort */
  26        [5] = { 0, 0 },         /* HVC, unused */
  27        [6] = { 4, 4 },         /* IRQ, unused */
  28        [7] = { 4, 4 },         /* FIQ, unused */
  29};
  30
  31static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
  32{
  33        unsigned long cpsr;
  34        unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
  35        bool is_thumb = (new_spsr_value & PSR_AA32_T_BIT);
  36        u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
  37        u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
  38
  39        cpsr = mode | PSR_AA32_I_BIT;
  40
  41        if (sctlr & (1 << 30))
  42                cpsr |= PSR_AA32_T_BIT;
  43        if (sctlr & (1 << 25))
  44                cpsr |= PSR_AA32_E_BIT;
  45
  46        *vcpu_cpsr(vcpu) = cpsr;
  47
  48        /* Note: These now point to the banked copies */
  49        vcpu_write_spsr(vcpu, new_spsr_value);
  50        *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
  51
  52        /* Branch to exception vector */
  53        if (sctlr & (1 << 13))
  54                vect_offset += 0xffff0000;
  55        else /* always have security exceptions */
  56                vect_offset += vcpu_cp15(vcpu, c12_VBAR);
  57
  58        *vcpu_pc(vcpu) = vect_offset;
  59}
  60
  61void kvm_inject_undef32(struct kvm_vcpu *vcpu)
  62{
  63        prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
  64}
  65
  66/*
  67 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  68 * pseudocode.
  69 */
  70static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
  71                         unsigned long addr)
  72{
  73        u32 vect_offset;
  74        u32 *far, *fsr;
  75        bool is_lpae;
  76
  77        if (is_pabt) {
  78                vect_offset = 12;
  79                far = &vcpu_cp15(vcpu, c6_IFAR);
  80                fsr = &vcpu_cp15(vcpu, c5_IFSR);
  81        } else { /* !iabt */
  82                vect_offset = 16;
  83                far = &vcpu_cp15(vcpu, c6_DFAR);
  84                fsr = &vcpu_cp15(vcpu, c5_DFSR);
  85        }
  86
  87        prepare_fault32(vcpu, PSR_AA32_MODE_ABT | PSR_AA32_A_BIT, vect_offset);
  88
  89        *far = addr;
  90
  91        /* Give the guest an IMPLEMENTATION DEFINED exception */
  92        is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
  93        if (is_lpae)
  94                *fsr = 1 << 9 | 0x34;
  95        else
  96                *fsr = 0x14;
  97}
  98
  99void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
 100{
 101        inject_abt32(vcpu, false, addr);
 102}
 103
 104void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
 105{
 106        inject_abt32(vcpu, true, addr);
 107}
 108