linux/arch/arm/kvm/interrupts.S
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
   3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License, version 2, as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  17 */
  18
  19#include <linux/linkage.h>
  20#include <linux/const.h>
  21#include <asm/unified.h>
  22#include <asm/page.h>
  23#include <asm/ptrace.h>
  24#include <asm/asm-offsets.h>
  25#include <asm/kvm_asm.h>
  26#include <asm/kvm_arm.h>
  27#include <asm/vfpmacros.h>
  28#include "interrupts_head.S"
  29
  30        .text
  31
  32__kvm_hyp_code_start:
  33        .globl __kvm_hyp_code_start
  34
  35/********************************************************************
  36 * Flush per-VMID TLBs
  37 *
  38 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
  39 *
  40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
  41 * inside the inner-shareable domain (which is the case for all v7
  42 * implementations).  If we come across a non-IS SMP implementation, we'll
  43 * have to use an IPI based mechanism. Until then, we stick to the simple
  44 * hardware assisted version.
  45 *
  46 * As v7 does not support flushing per IPA, just nuke the whole TLB
  47 * instead, ignoring the ipa value.
  48 */
  49ENTRY(__kvm_tlb_flush_vmid_ipa)
  50        push    {r2, r3}
  51
  52        add     r0, r0, #KVM_VTTBR
  53        ldrd    r2, r3, [r0]
  54        mcrr    p15, 6, r2, r3, c2      @ Write VTTBR
  55        isb
  56        mcr     p15, 0, r0, c8, c3, 0   @ TLBIALLIS (rt ignored)
  57        dsb
  58        isb
  59        mov     r2, #0
  60        mov     r3, #0
  61        mcrr    p15, 6, r2, r3, c2      @ Back to VMID #0
  62        isb                             @ Not necessary if followed by eret
  63
  64        pop     {r2, r3}
  65        bx      lr
  66ENDPROC(__kvm_tlb_flush_vmid_ipa)
  67
  68/********************************************************************
  69 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
  70 * domain, for all VMIDs
  71 *
  72 * void __kvm_flush_vm_context(void);
  73 */
  74ENTRY(__kvm_flush_vm_context)
  75        mov     r0, #0                  @ rn parameter for c15 flushes is SBZ
  76
  77        /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
  78        mcr     p15, 4, r0, c8, c3, 4
  79        /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
  80        mcr     p15, 0, r0, c7, c1, 0
  81        dsb
  82        isb                             @ Not necessary if followed by eret
  83
  84        bx      lr
  85ENDPROC(__kvm_flush_vm_context)
  86
  87
  88/********************************************************************
  89 *  Hypervisor world-switch code
  90 *
  91 *
  92 * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
  93 */
  94ENTRY(__kvm_vcpu_run)
  95        @ Save the vcpu pointer
  96        mcr     p15, 4, vcpu, c13, c0, 2        @ HTPIDR
  97
  98        save_host_regs
  99
 100        restore_vgic_state
 101        restore_timer_state
 102
 103        @ Store hardware CP15 state and load guest state
 104        read_cp15_state store_to_vcpu = 0
 105        write_cp15_state read_from_vcpu = 1
 106
 107        @ If the host kernel has not been configured with VFPv3 support,
 108        @ then it is safer if we deny guests from using it as well.
 109#ifdef CONFIG_VFPv3
 110        @ Set FPEXC_EN so the guest doesn't trap floating point instructions
 111        VFPFMRX r2, FPEXC               @ VMRS
 112        push    {r2}
 113        orr     r2, r2, #FPEXC_EN
 114        VFPFMXR FPEXC, r2               @ VMSR
 115#endif
 116
 117        @ Configure Hyp-role
 118        configure_hyp_role vmentry
 119
 120        @ Trap coprocessor CRx accesses
 121        set_hstr vmentry
 122        set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
 123        set_hdcr vmentry
 124
 125        @ Write configured ID register into MIDR alias
 126        ldr     r1, [vcpu, #VCPU_MIDR]
 127        mcr     p15, 4, r1, c0, c0, 0
 128
 129        @ Write guest view of MPIDR into VMPIDR
 130        ldr     r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
 131        mcr     p15, 4, r1, c0, c0, 5
 132
 133        @ Set up guest memory translation
 134        ldr     r1, [vcpu, #VCPU_KVM]
 135        add     r1, r1, #KVM_VTTBR
 136        ldrd    r2, r3, [r1]
 137        mcrr    p15, 6, r2, r3, c2      @ Write VTTBR
 138
 139        @ We're all done, just restore the GPRs and go to the guest
 140        restore_guest_regs
 141        clrex                           @ Clear exclusive monitor
 142        eret
 143
 144__kvm_vcpu_return:
 145        /*
 146         * return convention:
 147         * guest r0, r1, r2 saved on the stack
 148         * r0: vcpu pointer
 149         * r1: exception code
 150         */
 151        save_guest_regs
 152
 153        @ Set VMID == 0
 154        mov     r2, #0
 155        mov     r3, #0
 156        mcrr    p15, 6, r2, r3, c2      @ Write VTTBR
 157
 158        @ Don't trap coprocessor accesses for host kernel
 159        set_hstr vmexit
 160        set_hdcr vmexit
 161        set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
 162
 163#ifdef CONFIG_VFPv3
 164        @ Save floating point registers we if let guest use them.
 165        tst     r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
 166        bne     after_vfp_restore
 167
 168        @ Switch VFP/NEON hardware state to the host's
 169        add     r7, vcpu, #VCPU_VFP_GUEST
 170        store_vfp_state r7
 171        add     r7, vcpu, #VCPU_VFP_HOST
 172        ldr     r7, [r7]
 173        restore_vfp_state r7
 174
 175after_vfp_restore:
 176        @ Restore FPEXC_EN which we clobbered on entry
 177        pop     {r2}
 178        VFPFMXR FPEXC, r2
 179#endif
 180
 181        @ Reset Hyp-role
 182        configure_hyp_role vmexit
 183
 184        @ Let host read hardware MIDR
 185        mrc     p15, 0, r2, c0, c0, 0
 186        mcr     p15, 4, r2, c0, c0, 0
 187
 188        @ Back to hardware MPIDR
 189        mrc     p15, 0, r2, c0, c0, 5
 190        mcr     p15, 4, r2, c0, c0, 5
 191
 192        @ Store guest CP15 state and restore host state
 193        read_cp15_state store_to_vcpu = 1
 194        write_cp15_state read_from_vcpu = 0
 195
 196        save_timer_state
 197        save_vgic_state
 198
 199        restore_host_regs
 200        clrex                           @ Clear exclusive monitor
 201        mov     r0, r1                  @ Return the return code
 202        mov     r1, #0                  @ Clear upper bits in return value
 203        bx      lr                      @ return to IOCTL
 204
 205/********************************************************************
 206 *  Call function in Hyp mode
 207 *
 208 *
 209 * u64 kvm_call_hyp(void *hypfn, ...);
 210 *
 211 * This is not really a variadic function in the classic C-way and care must
 212 * be taken when calling this to ensure parameters are passed in registers
 213 * only, since the stack will change between the caller and the callee.
 214 *
 215 * Call the function with the first argument containing a pointer to the
 216 * function you wish to call in Hyp mode, and subsequent arguments will be
 217 * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
 218 * function pointer can be passed).  The function being called must be mapped
 219 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
 220 * passed in r0 and r1.
 221 *
 222 * The calling convention follows the standard AAPCS:
 223 *   r0 - r3: caller save
 224 *   r12:     caller save
 225 *   rest:    callee save
 226 */
 227ENTRY(kvm_call_hyp)
 228        hvc     #0
 229        bx      lr
 230
 231/********************************************************************
 232 * Hypervisor exception vector and handlers
 233 *
 234 *
 235 * The KVM/ARM Hypervisor ABI is defined as follows:
 236 *
 237 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
 238 * instruction is issued since all traps are disabled when running the host
 239 * kernel as per the Hyp-mode initialization at boot time.
 240 *
 241 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
 242 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
 243 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
 244 * instructions are called from within Hyp-mode.
 245 *
 246 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
 247 *    Switching to Hyp mode is done through a simple HVC #0 instruction. The
 248 *    exception vector code will check that the HVC comes from VMID==0 and if
 249 *    so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
 250 *    - r0 contains a pointer to a HYP function
 251 *    - r1, r2, and r3 contain arguments to the above function.
 252 *    - The HYP function will be called with its arguments in r0, r1 and r2.
 253 *    On HYP function return, we return directly to SVC.
 254 *
 255 * Note that the above is used to execute code in Hyp-mode from a host-kernel
 256 * point of view, and is a different concept from performing a world-switch and
 257 * executing guest code SVC mode (with a VMID != 0).
 258 */
 259
 260/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
 261.macro bad_exception exception_code, panic_str
 262        push    {r0-r2}
 263        mrrc    p15, 6, r0, r1, c2      @ Read VTTBR
 264        lsr     r1, r1, #16
 265        ands    r1, r1, #0xff
 266        beq     99f
 267
 268        load_vcpu                       @ Load VCPU pointer
 269        .if \exception_code == ARM_EXCEPTION_DATA_ABORT
 270        mrc     p15, 4, r2, c5, c2, 0   @ HSR
 271        mrc     p15, 4, r1, c6, c0, 0   @ HDFAR
 272        str     r2, [vcpu, #VCPU_HSR]
 273        str     r1, [vcpu, #VCPU_HxFAR]
 274        .endif
 275        .if \exception_code == ARM_EXCEPTION_PREF_ABORT
 276        mrc     p15, 4, r2, c5, c2, 0   @ HSR
 277        mrc     p15, 4, r1, c6, c0, 2   @ HIFAR
 278        str     r2, [vcpu, #VCPU_HSR]
 279        str     r1, [vcpu, #VCPU_HxFAR]
 280        .endif
 281        mov     r1, #\exception_code
 282        b       __kvm_vcpu_return
 283
 284        @ We were in the host already. Let's craft a panic-ing return to SVC.
 28599:     mrs     r2, cpsr
 286        bic     r2, r2, #MODE_MASK
 287        orr     r2, r2, #SVC_MODE
 288THUMB(  orr     r2, r2, #PSR_T_BIT      )
 289        msr     spsr_cxsf, r2
 290        mrs     r1, ELR_hyp
 291        ldr     r2, =BSYM(panic)
 292        msr     ELR_hyp, r2
 293        ldr     r0, =\panic_str
 294        eret
 295.endm
 296
 297        .text
 298
 299        .align 5
 300__kvm_hyp_vector:
 301        .globl __kvm_hyp_vector
 302
 303        @ Hyp-mode exception vector
 304        W(b)    hyp_reset
 305        W(b)    hyp_undef
 306        W(b)    hyp_svc
 307        W(b)    hyp_pabt
 308        W(b)    hyp_dabt
 309        W(b)    hyp_hvc
 310        W(b)    hyp_irq
 311        W(b)    hyp_fiq
 312
 313        .align
 314hyp_reset:
 315        b       hyp_reset
 316
 317        .align
 318hyp_undef:
 319        bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
 320
 321        .align
 322hyp_svc:
 323        bad_exception ARM_EXCEPTION_HVC, svc_die_str
 324
 325        .align
 326hyp_pabt:
 327        bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
 328
 329        .align
 330hyp_dabt:
 331        bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
 332
 333        .align
 334hyp_hvc:
 335        /*
 336         * Getting here is either becuase of a trap from a guest or from calling
 337         * HVC from the host kernel, which means "switch to Hyp mode".
 338         */
 339        push    {r0, r1, r2}
 340
 341        @ Check syndrome register
 342        mrc     p15, 4, r1, c5, c2, 0   @ HSR
 343        lsr     r0, r1, #HSR_EC_SHIFT
 344#ifdef CONFIG_VFPv3
 345        cmp     r0, #HSR_EC_CP_0_13
 346        beq     switch_to_guest_vfp
 347#endif
 348        cmp     r0, #HSR_EC_HVC
 349        bne     guest_trap              @ Not HVC instr.
 350
 351        /*
 352         * Let's check if the HVC came from VMID 0 and allow simple
 353         * switch to Hyp mode
 354         */
 355        mrrc    p15, 6, r0, r2, c2
 356        lsr     r2, r2, #16
 357        and     r2, r2, #0xff
 358        cmp     r2, #0
 359        bne     guest_trap              @ Guest called HVC
 360
 361host_switch_to_hyp:
 362        pop     {r0, r1, r2}
 363
 364        push    {lr}
 365        mrs     lr, SPSR
 366        push    {lr}
 367
 368        mov     lr, r0
 369        mov     r0, r1
 370        mov     r1, r2
 371        mov     r2, r3
 372
 373THUMB(  orr     lr, #1)
 374        blx     lr                      @ Call the HYP function
 375
 376        pop     {lr}
 377        msr     SPSR_csxf, lr
 378        pop     {lr}
 379        eret
 380
 381guest_trap:
 382        load_vcpu                       @ Load VCPU pointer to r0
 383        str     r1, [vcpu, #VCPU_HSR]
 384
 385        @ Check if we need the fault information
 386        lsr     r1, r1, #HSR_EC_SHIFT
 387        cmp     r1, #HSR_EC_IABT
 388        mrceq   p15, 4, r2, c6, c0, 2   @ HIFAR
 389        beq     2f
 390        cmp     r1, #HSR_EC_DABT
 391        bne     1f
 392        mrc     p15, 4, r2, c6, c0, 0   @ HDFAR
 393
 3942:      str     r2, [vcpu, #VCPU_HxFAR]
 395
 396        /*
 397         * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
 398         *
 399         * Abort on the stage 2 translation for a memory access from a
 400         * Non-secure PL1 or PL0 mode:
 401         *
 402         * For any Access flag fault or Translation fault, and also for any
 403         * Permission fault on the stage 2 translation of a memory access
 404         * made as part of a translation table walk for a stage 1 translation,
 405         * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
 406         * is UNKNOWN.
 407         */
 408
 409        /* Check for permission fault, and S1PTW */
 410        mrc     p15, 4, r1, c5, c2, 0   @ HSR
 411        and     r0, r1, #HSR_FSC_TYPE
 412        cmp     r0, #FSC_PERM
 413        tsteq   r1, #(1 << 7)           @ S1PTW
 414        mrcne   p15, 4, r2, c6, c0, 4   @ HPFAR
 415        bne     3f
 416
 417        /* Resolve IPA using the xFAR */
 418        mcr     p15, 0, r2, c7, c8, 0   @ ATS1CPR
 419        isb
 420        mrrc    p15, 0, r0, r1, c7      @ PAR
 421        tst     r0, #1
 422        bne     4f                      @ Failed translation
 423        ubfx    r2, r0, #12, #20
 424        lsl     r2, r2, #4
 425        orr     r2, r2, r1, lsl #24
 426
 4273:      load_vcpu                       @ Load VCPU pointer to r0
 428        str     r2, [r0, #VCPU_HPFAR]
 429
 4301:      mov     r1, #ARM_EXCEPTION_HVC
 431        b       __kvm_vcpu_return
 432
 4334:      pop     {r0, r1, r2}            @ Failed translation, return to guest
 434        eret
 435
 436/*
 437 * If VFPv3 support is not available, then we will not switch the VFP
 438 * registers; however cp10 and cp11 accesses will still trap and fallback
 439 * to the regular coprocessor emulation code, which currently will
 440 * inject an undefined exception to the guest.
 441 */
 442#ifdef CONFIG_VFPv3
 443switch_to_guest_vfp:
 444        load_vcpu                       @ Load VCPU pointer to r0
 445        push    {r3-r7}
 446
 447        @ NEON/VFP used.  Turn on VFP access.
 448        set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
 449
 450        @ Switch VFP/NEON hardware state to the guest's
 451        add     r7, r0, #VCPU_VFP_HOST
 452        ldr     r7, [r7]
 453        store_vfp_state r7
 454        add     r7, r0, #VCPU_VFP_GUEST
 455        restore_vfp_state r7
 456
 457        pop     {r3-r7}
 458        pop     {r0-r2}
 459        eret
 460#endif
 461
 462        .align
 463hyp_irq:
 464        push    {r0, r1, r2}
 465        mov     r1, #ARM_EXCEPTION_IRQ
 466        load_vcpu                       @ Load VCPU pointer to r0
 467        b       __kvm_vcpu_return
 468
 469        .align
 470hyp_fiq:
 471        b       hyp_fiq
 472
 473        .ltorg
 474
 475__kvm_hyp_code_end:
 476        .globl  __kvm_hyp_code_end
 477
 478        .section ".rodata"
 479
 480und_die_str:
 481        .ascii  "unexpected undefined exception in Hyp mode at: %#08x"
 482pabt_die_str:
 483        .ascii  "unexpected prefetch abort in Hyp mode at: %#08x"
 484dabt_die_str:
 485        .ascii  "unexpected data abort in Hyp mode at: %#08x"
 486svc_die_str:
 487        .ascii  "unexpected HVC/SVC trap in Hyp mode at: %#08x"
 488