linux/arch/mips/kvm/dyntrans.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10 */
  11
  12#include <linux/errno.h>
  13#include <linux/err.h>
  14#include <linux/highmem.h>
  15#include <linux/kvm_host.h>
  16#include <linux/vmalloc.h>
  17#include <linux/fs.h>
  18#include <linux/bootmem.h>
  19#include <asm/cacheflush.h>
  20
  21#include "commpage.h"
  22
  23/**
  24 * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
  25 * @vcpu:       Virtual CPU.
  26 * @opc:        PC of instruction to replace.
  27 * @replace:    Instruction to write
  28 */
  29static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
  30                                  union mips_instruction replace)
  31{
  32        unsigned long paddr, flags;
  33        void *vaddr;
  34
  35        if (KVM_GUEST_KSEGX((unsigned long)opc) == KVM_GUEST_KSEG0) {
  36                paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
  37                                                            (unsigned long)opc);
  38                vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
  39                vaddr += paddr & ~PAGE_MASK;
  40                memcpy(vaddr, (void *)&replace, sizeof(u32));
  41                local_flush_icache_range((unsigned long)vaddr,
  42                                         (unsigned long)vaddr + 32);
  43                kunmap_atomic(vaddr);
  44        } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
  45                local_irq_save(flags);
  46                memcpy((void *)opc, (void *)&replace, sizeof(u32));
  47                __local_flush_icache_user_range((unsigned long)opc,
  48                                                (unsigned long)opc + 32);
  49                local_irq_restore(flags);
  50        } else {
  51                kvm_err("%s: Invalid address: %p\n", __func__, opc);
  52                return -EFAULT;
  53        }
  54
  55        return 0;
  56}
  57
  58int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
  59                               struct kvm_vcpu *vcpu)
  60{
  61        union mips_instruction nop_inst = { 0 };
  62
  63        /* Replace the CACHE instruction, with a NOP */
  64        return kvm_mips_trans_replace(vcpu, opc, nop_inst);
  65}
  66
  67/*
  68 * Address based CACHE instructions are transformed into synci(s). A little
  69 * heavy for just D-cache invalidates, but avoids an expensive trap
  70 */
  71int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
  72                            struct kvm_vcpu *vcpu)
  73{
  74        union mips_instruction synci_inst = { 0 };
  75
  76        synci_inst.i_format.opcode = bcond_op;
  77        synci_inst.i_format.rs = inst.i_format.rs;
  78        synci_inst.i_format.rt = synci_op;
  79        if (cpu_has_mips_r6)
  80                synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
  81        else
  82                synci_inst.i_format.simmediate = inst.i_format.simmediate;
  83
  84        return kvm_mips_trans_replace(vcpu, opc, synci_inst);
  85}
  86
  87int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
  88                        struct kvm_vcpu *vcpu)
  89{
  90        union mips_instruction mfc0_inst = { 0 };
  91        u32 rd, sel;
  92
  93        rd = inst.c0r_format.rd;
  94        sel = inst.c0r_format.sel;
  95
  96        if (rd == MIPS_CP0_ERRCTL && sel == 0) {
  97                mfc0_inst.r_format.opcode = spec_op;
  98                mfc0_inst.r_format.rd = inst.c0r_format.rt;
  99                mfc0_inst.r_format.func = add_op;
 100        } else {
 101                mfc0_inst.i_format.opcode = lw_op;
 102                mfc0_inst.i_format.rt = inst.c0r_format.rt;
 103                mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
 104                        offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
 105#ifdef CONFIG_CPU_BIG_ENDIAN
 106                if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
 107                        mfc0_inst.i_format.simmediate |= 4;
 108#endif
 109        }
 110
 111        return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
 112}
 113
 114int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
 115                        struct kvm_vcpu *vcpu)
 116{
 117        union mips_instruction mtc0_inst = { 0 };
 118        u32 rd, sel;
 119
 120        rd = inst.c0r_format.rd;
 121        sel = inst.c0r_format.sel;
 122
 123        mtc0_inst.i_format.opcode = sw_op;
 124        mtc0_inst.i_format.rt = inst.c0r_format.rt;
 125        mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
 126                offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
 127#ifdef CONFIG_CPU_BIG_ENDIAN
 128        if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
 129                mtc0_inst.i_format.simmediate |= 4;
 130#endif
 131
 132        return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
 133}
 134