linux/arch/powerpc/kernel/kvm_emul.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *
   4 * Copyright SUSE Linux Products GmbH 2010
   5 * Copyright 2010-2011 Freescale Semiconductor, Inc.
   6 *
   7 * Authors: Alexander Graf <agraf@suse.de>
   8 */
   9
  10#include <asm/ppc_asm.h>
  11#include <asm/kvm_asm.h>
  12#include <asm/reg.h>
  13#include <asm/page.h>
  14#include <asm/asm-offsets.h>
  15#include <asm/asm-compat.h>
  16
  17#define KVM_MAGIC_PAGE          (-4096)
  18
  19#ifdef CONFIG_64BIT
  20#define LL64(reg, offs, reg2)   ld      reg, (offs)(reg2)
  21#define STL64(reg, offs, reg2)  std     reg, (offs)(reg2)
  22#else
  23#define LL64(reg, offs, reg2)   lwz     reg, (offs + 4)(reg2)
  24#define STL64(reg, offs, reg2)  stw     reg, (offs + 4)(reg2)
  25#endif
  26
  27#define SCRATCH_SAVE                                                    \
  28        /* Enable critical section. We are critical if                  \
  29           shared->critical == r1 */                                    \
  30        STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);              \
  31                                                                        \
  32        /* Save state */                                                \
  33        PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
  34        PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
  35        mfcr    r31;                                                    \
  36        stw     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
  37
  38#define SCRATCH_RESTORE                                                 \
  39        /* Restore state */                                             \
  40        PPC_LL  r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0);          \
  41        lwz     r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);          \
  42        mtcr    r30;                                                    \
  43        PPC_LL  r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0);          \
  44                                                                        \
  45        /* Disable critical section. We are critical if                 \
  46           shared->critical == r1 and r2 is always != r1 */             \
  47        STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
  48
  49.global kvm_template_start
  50kvm_template_start:
  51
  52.global kvm_emulate_mtmsrd
  53kvm_emulate_mtmsrd:
  54
  55        SCRATCH_SAVE
  56
  57        /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
  58        LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  59        lis     r30, (~(MSR_EE | MSR_RI))@h
  60        ori     r30, r30, (~(MSR_EE | MSR_RI))@l
  61        and     r31, r31, r30
  62
  63        /* OR the register's (MSR_EE|MSR_RI) on MSR */
  64kvm_emulate_mtmsrd_reg:
  65        ori     r30, r0, 0
  66        andi.   r30, r30, (MSR_EE|MSR_RI)
  67        or      r31, r31, r30
  68
  69        /* Put MSR back into magic page */
  70        STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
  71
  72        /* Check if we have to fetch an interrupt */
  73        lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
  74        cmpwi   r31, 0
  75        beq+    no_check
  76
  77        /* Check if we may trigger an interrupt */
  78        andi.   r30, r30, MSR_EE
  79        beq     no_check
  80
  81        SCRATCH_RESTORE
  82
  83        /* Nag hypervisor */
  84kvm_emulate_mtmsrd_orig_ins:
  85        tlbsync
  86
  87        b       kvm_emulate_mtmsrd_branch
  88
  89no_check:
  90
  91        SCRATCH_RESTORE
  92
  93        /* Go back to caller */
  94kvm_emulate_mtmsrd_branch:
  95        b       .
  96kvm_emulate_mtmsrd_end:
  97
  98.global kvm_emulate_mtmsrd_branch_offs
  99kvm_emulate_mtmsrd_branch_offs:
 100        .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
 101
 102.global kvm_emulate_mtmsrd_reg_offs
 103kvm_emulate_mtmsrd_reg_offs:
 104        .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
 105
 106.global kvm_emulate_mtmsrd_orig_ins_offs
 107kvm_emulate_mtmsrd_orig_ins_offs:
 108        .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
 109
 110.global kvm_emulate_mtmsrd_len
 111kvm_emulate_mtmsrd_len:
 112        .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
 113
 114
 115#define MSR_SAFE_BITS (MSR_EE | MSR_RI)
 116#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
 117
 118.global kvm_emulate_mtmsr
 119kvm_emulate_mtmsr:
 120
 121        SCRATCH_SAVE
 122
 123        /* Fetch old MSR in r31 */
 124        LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 125
 126        /* Find the changed bits between old and new MSR */
 127kvm_emulate_mtmsr_reg1:
 128        ori     r30, r0, 0
 129        xor     r31, r30, r31
 130
 131        /* Check if we need to really do mtmsr */
 132        LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
 133        and.    r31, r31, r30
 134
 135        /* No critical bits changed? Maybe we can stay in the guest. */
 136        beq     maybe_stay_in_guest
 137
 138do_mtmsr:
 139
 140        SCRATCH_RESTORE
 141
 142        /* Just fire off the mtmsr if it's critical */
 143kvm_emulate_mtmsr_orig_ins:
 144        mtmsr   r0
 145
 146        b       kvm_emulate_mtmsr_branch
 147
 148maybe_stay_in_guest:
 149
 150        /* Get the target register in r30 */
 151kvm_emulate_mtmsr_reg2:
 152        ori     r30, r0, 0
 153
 154        /* Put MSR into magic page because we don't call mtmsr */
 155        STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 156
 157        /* Check if we have to fetch an interrupt */
 158        lwz     r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
 159        cmpwi   r31, 0
 160        beq+    no_mtmsr
 161
 162        /* Check if we may trigger an interrupt */
 163        andi.   r31, r30, MSR_EE
 164        bne     do_mtmsr
 165
 166no_mtmsr:
 167
 168        SCRATCH_RESTORE
 169
 170        /* Go back to caller */
 171kvm_emulate_mtmsr_branch:
 172        b       .
 173kvm_emulate_mtmsr_end:
 174
 175.global kvm_emulate_mtmsr_branch_offs
 176kvm_emulate_mtmsr_branch_offs:
 177        .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
 178
 179.global kvm_emulate_mtmsr_reg1_offs
 180kvm_emulate_mtmsr_reg1_offs:
 181        .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
 182
 183.global kvm_emulate_mtmsr_reg2_offs
 184kvm_emulate_mtmsr_reg2_offs:
 185        .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
 186
 187.global kvm_emulate_mtmsr_orig_ins_offs
 188kvm_emulate_mtmsr_orig_ins_offs:
 189        .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
 190
 191.global kvm_emulate_mtmsr_len
 192kvm_emulate_mtmsr_len:
 193        .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
 194
 195/* also used for wrteei 1 */
 196.global kvm_emulate_wrtee
 197kvm_emulate_wrtee:
 198
 199        SCRATCH_SAVE
 200
 201        /* Fetch old MSR in r31 */
 202        LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 203
 204        /* Insert new MSR[EE] */
 205kvm_emulate_wrtee_reg:
 206        ori     r30, r0, 0
 207        rlwimi  r31, r30, 0, MSR_EE
 208
 209        /*
 210         * If MSR[EE] is now set, check for a pending interrupt.
 211         * We could skip this if MSR[EE] was already on, but that
 212         * should be rare, so don't bother.
 213         */
 214        andi.   r30, r30, MSR_EE
 215
 216        /* Put MSR into magic page because we don't call wrtee */
 217        STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 218
 219        beq     no_wrtee
 220
 221        /* Check if we have to fetch an interrupt */
 222        lwz     r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
 223        cmpwi   r30, 0
 224        bne     do_wrtee
 225
 226no_wrtee:
 227        SCRATCH_RESTORE
 228
 229        /* Go back to caller */
 230kvm_emulate_wrtee_branch:
 231        b       .
 232
 233do_wrtee:
 234        SCRATCH_RESTORE
 235
 236        /* Just fire off the wrtee if it's critical */
 237kvm_emulate_wrtee_orig_ins:
 238        wrtee   r0
 239
 240        b       kvm_emulate_wrtee_branch
 241
 242kvm_emulate_wrtee_end:
 243
 244.global kvm_emulate_wrtee_branch_offs
 245kvm_emulate_wrtee_branch_offs:
 246        .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
 247
 248.global kvm_emulate_wrtee_reg_offs
 249kvm_emulate_wrtee_reg_offs:
 250        .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
 251
 252.global kvm_emulate_wrtee_orig_ins_offs
 253kvm_emulate_wrtee_orig_ins_offs:
 254        .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
 255
 256.global kvm_emulate_wrtee_len
 257kvm_emulate_wrtee_len:
 258        .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
 259
 260.global kvm_emulate_wrteei_0
 261kvm_emulate_wrteei_0:
 262        SCRATCH_SAVE
 263
 264        /* Fetch old MSR in r31 */
 265        LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 266
 267        /* Remove MSR_EE from old MSR */
 268        rlwinm  r31, r31, 0, ~MSR_EE
 269
 270        /* Write new MSR value back */
 271        STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 272
 273        SCRATCH_RESTORE
 274
 275        /* Go back to caller */
 276kvm_emulate_wrteei_0_branch:
 277        b       .
 278kvm_emulate_wrteei_0_end:
 279
 280.global kvm_emulate_wrteei_0_branch_offs
 281kvm_emulate_wrteei_0_branch_offs:
 282        .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
 283
 284.global kvm_emulate_wrteei_0_len
 285kvm_emulate_wrteei_0_len:
 286        .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
 287
 288.global kvm_emulate_mtsrin
 289kvm_emulate_mtsrin:
 290
 291        SCRATCH_SAVE
 292
 293        LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
 294        andi.   r31, r31, MSR_DR | MSR_IR
 295        beq     kvm_emulate_mtsrin_reg1
 296
 297        SCRATCH_RESTORE
 298
 299kvm_emulate_mtsrin_orig_ins:
 300        nop
 301        b       kvm_emulate_mtsrin_branch
 302
 303kvm_emulate_mtsrin_reg1:
 304        /* rX >> 26 */
 305        rlwinm  r30,r0,6,26,29
 306
 307kvm_emulate_mtsrin_reg2:
 308        stw     r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
 309
 310        SCRATCH_RESTORE
 311
 312        /* Go back to caller */
 313kvm_emulate_mtsrin_branch:
 314        b       .
 315kvm_emulate_mtsrin_end:
 316
 317.global kvm_emulate_mtsrin_branch_offs
 318kvm_emulate_mtsrin_branch_offs:
 319        .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
 320
 321.global kvm_emulate_mtsrin_reg1_offs
 322kvm_emulate_mtsrin_reg1_offs:
 323        .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
 324
 325.global kvm_emulate_mtsrin_reg2_offs
 326kvm_emulate_mtsrin_reg2_offs:
 327        .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
 328
 329.global kvm_emulate_mtsrin_orig_ins_offs
 330kvm_emulate_mtsrin_orig_ins_offs:
 331        .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
 332
 333.global kvm_emulate_mtsrin_len
 334kvm_emulate_mtsrin_len:
 335        .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
 336
 337.global kvm_template_end
 338kvm_template_end:
 339