linux/arch/arm64/kvm/mmio.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   5 */
   6
   7#include <linux/kvm_host.h>
   8#include <asm/kvm_emulate.h>
   9#include <trace/events/kvm.h>
  10
  11#include "trace.h"
  12
  13void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
  14{
  15        void *datap = NULL;
  16        union {
  17                u8      byte;
  18                u16     hword;
  19                u32     word;
  20                u64     dword;
  21        } tmp;
  22
  23        switch (len) {
  24        case 1:
  25                tmp.byte        = data;
  26                datap           = &tmp.byte;
  27                break;
  28        case 2:
  29                tmp.hword       = data;
  30                datap           = &tmp.hword;
  31                break;
  32        case 4:
  33                tmp.word        = data;
  34                datap           = &tmp.word;
  35                break;
  36        case 8:
  37                tmp.dword       = data;
  38                datap           = &tmp.dword;
  39                break;
  40        }
  41
  42        memcpy(buf, datap, len);
  43}
  44
  45unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
  46{
  47        unsigned long data = 0;
  48        union {
  49                u16     hword;
  50                u32     word;
  51                u64     dword;
  52        } tmp;
  53
  54        switch (len) {
  55        case 1:
  56                data = *(u8 *)buf;
  57                break;
  58        case 2:
  59                memcpy(&tmp.hword, buf, len);
  60                data = tmp.hword;
  61                break;
  62        case 4:
  63                memcpy(&tmp.word, buf, len);
  64                data = tmp.word;
  65                break;
  66        case 8:
  67                memcpy(&tmp.dword, buf, len);
  68                data = tmp.dword;
  69                break;
  70        }
  71
  72        return data;
  73}
  74
  75/**
  76 * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
  77 *                           or in-kernel IO emulation
  78 *
  79 * @vcpu: The VCPU pointer
  80 */
  81int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
  82{
  83        unsigned long data;
  84        unsigned int len;
  85        int mask;
  86
  87        /* Detect an already handled MMIO return */
  88        if (unlikely(!vcpu->mmio_needed))
  89                return 0;
  90
  91        vcpu->mmio_needed = 0;
  92
  93        if (!kvm_vcpu_dabt_iswrite(vcpu)) {
  94                struct kvm_run *run = vcpu->run;
  95
  96                len = kvm_vcpu_dabt_get_as(vcpu);
  97                data = kvm_mmio_read_buf(run->mmio.data, len);
  98
  99                if (kvm_vcpu_dabt_issext(vcpu) &&
 100                    len < sizeof(unsigned long)) {
 101                        mask = 1U << ((len * 8) - 1);
 102                        data = (data ^ mask) - mask;
 103                }
 104
 105                if (!kvm_vcpu_dabt_issf(vcpu))
 106                        data = data & 0xffffffff;
 107
 108                trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
 109                               &data);
 110                data = vcpu_data_host_to_guest(vcpu, data, len);
 111                vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
 112        }
 113
 114        /*
 115         * The MMIO instruction is emulated and should not be re-executed
 116         * in the guest.
 117         */
 118        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
 119
 120        return 0;
 121}
 122
 123int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
 124{
 125        struct kvm_run *run = vcpu->run;
 126        unsigned long data;
 127        unsigned long rt;
 128        int ret;
 129        bool is_write;
 130        int len;
 131        u8 data_buf[8];
 132
 133        /*
 134         * No valid syndrome? Ask userspace for help if it has
 135         * volunteered to do so, and bail out otherwise.
 136         */
 137        if (!kvm_vcpu_dabt_isvalid(vcpu)) {
 138                if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
 139                        run->exit_reason = KVM_EXIT_ARM_NISV;
 140                        run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
 141                        run->arm_nisv.fault_ipa = fault_ipa;
 142                        return 0;
 143                }
 144
 145                kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
 146                return -ENOSYS;
 147        }
 148
 149        /*
 150         * Prepare MMIO operation. First decode the syndrome data we get
 151         * from the CPU. Then try if some in-kernel emulation feels
 152         * responsible, otherwise let user space do its magic.
 153         */
 154        is_write = kvm_vcpu_dabt_iswrite(vcpu);
 155        len = kvm_vcpu_dabt_get_as(vcpu);
 156        rt = kvm_vcpu_dabt_get_rd(vcpu);
 157
 158        if (is_write) {
 159                data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
 160                                               len);
 161
 162                trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
 163                kvm_mmio_write_buf(data_buf, len, data);
 164
 165                ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
 166                                       data_buf);
 167        } else {
 168                trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
 169                               fault_ipa, NULL);
 170
 171                ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
 172                                      data_buf);
 173        }
 174
 175        /* Now prepare kvm_run for the potential return to userland. */
 176        run->mmio.is_write      = is_write;
 177        run->mmio.phys_addr     = fault_ipa;
 178        run->mmio.len           = len;
 179        vcpu->mmio_needed       = 1;
 180
 181        if (!ret) {
 182                /* We handled the access successfully in the kernel. */
 183                if (!is_write)
 184                        memcpy(run->mmio.data, data_buf, len);
 185                vcpu->stat.mmio_exit_kernel++;
 186                kvm_handle_mmio_return(vcpu);
 187                return 1;
 188        }
 189
 190        if (is_write)
 191                memcpy(run->mmio.data, data_buf, len);
 192        vcpu->stat.mmio_exit_user++;
 193        run->exit_reason        = KVM_EXIT_MMIO;
 194        return 0;
 195}
 196