linux/arch/x86/xen/irq.c
<<
>>
Prefs
   1#include <linux/hardirq.h>
   2
   3#include <asm/x86_init.h>
   4
   5#include <xen/interface/xen.h>
   6#include <xen/interface/sched.h>
   7#include <xen/interface/vcpu.h>
   8#include <xen/events.h>
   9
  10#include <asm/xen/hypercall.h>
  11#include <asm/xen/hypervisor.h>
  12
  13#include "xen-ops.h"
  14
  15/*
  16 * Force a proper event-channel callback from Xen after clearing the
  17 * callback mask. We do this in a very simple manner, by making a call
  18 * down into Xen. The pending flag will be checked by Xen on return.
  19 */
  20void xen_force_evtchn_callback(void)
  21{
  22        (void)HYPERVISOR_xen_version(0, NULL);
  23}
  24
  25static unsigned long xen_save_fl(void)
  26{
  27        struct vcpu_info *vcpu;
  28        unsigned long flags;
  29
  30        vcpu = this_cpu_read(xen_vcpu);
  31
  32        /* flag has opposite sense of mask */
  33        flags = !vcpu->evtchn_upcall_mask;
  34
  35        /* convert to IF type flag
  36           -0 -> 0x00000000
  37           -1 -> 0xffffffff
  38        */
  39        return (-flags) & X86_EFLAGS_IF;
  40}
  41PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
  42
  43static void xen_restore_fl(unsigned long flags)
  44{
  45        struct vcpu_info *vcpu;
  46
  47        /* convert from IF type flag */
  48        flags = !(flags & X86_EFLAGS_IF);
  49
  50        /* See xen_irq_enable() for why preemption must be disabled. */
  51        preempt_disable();
  52        vcpu = this_cpu_read(xen_vcpu);
  53        vcpu->evtchn_upcall_mask = flags;
  54
  55        if (flags == 0) {
  56                barrier(); /* unmask then check (avoid races) */
  57                if (unlikely(vcpu->evtchn_upcall_pending))
  58                        xen_force_evtchn_callback();
  59                preempt_enable();
  60        } else
  61                preempt_enable_no_resched();
  62}
  63PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
  64
  65static void xen_irq_disable(void)
  66{
  67        /* There's a one instruction preempt window here.  We need to
  68           make sure we're don't switch CPUs between getting the vcpu
  69           pointer and updating the mask. */
  70        preempt_disable();
  71        this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
  72        preempt_enable_no_resched();
  73}
  74PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
  75
  76static void xen_irq_enable(void)
  77{
  78        struct vcpu_info *vcpu;
  79
  80        /*
  81         * We may be preempted as soon as vcpu->evtchn_upcall_mask is
  82         * cleared, so disable preemption to ensure we check for
  83         * events on the VCPU we are still running on.
  84         */
  85        preempt_disable();
  86
  87        vcpu = this_cpu_read(xen_vcpu);
  88        vcpu->evtchn_upcall_mask = 0;
  89
  90        /* Doesn't matter if we get preempted here, because any
  91           pending event will get dealt with anyway. */
  92
  93        barrier(); /* unmask then check (avoid races) */
  94        if (unlikely(vcpu->evtchn_upcall_pending))
  95                xen_force_evtchn_callback();
  96
  97        preempt_enable();
  98}
  99PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
 100
 101static void xen_safe_halt(void)
 102{
 103        /* Blocking includes an implicit local_irq_enable(). */
 104        if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
 105                BUG();
 106}
 107
 108static void xen_halt(void)
 109{
 110        if (irqs_disabled())
 111                HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
 112        else
 113                xen_safe_halt();
 114}
 115
 116static const struct pv_irq_ops xen_irq_ops __initconst = {
 117        .save_fl = PV_CALLEE_SAVE(xen_save_fl),
 118        .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
 119        .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
 120        .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
 121
 122        .safe_halt = xen_safe_halt,
 123        .halt = xen_halt,
 124#ifdef CONFIG_X86_64
 125        .adjust_exception_frame = xen_adjust_exception_frame,
 126#endif
 127};
 128
 129void __init xen_init_irq_ops(void)
 130{
 131        pv_irq_ops = xen_irq_ops;
 132        x86_init.irqs.intr_init = xen_init_IRQ;
 133}
 134