linux/arch/x86/xen/spinlock.c
<<
>>
Prefs
   1/*
   2 * Split spinlock implementation out into its own file, so it can be
   3 * compiled in a FTRACE-compatible way.
   4 */
   5#include <linux/kernel_stat.h>
   6#include <linux/spinlock.h>
   7#include <linux/debugfs.h>
   8#include <linux/log2.h>
   9#include <linux/gfp.h>
  10#include <linux/slab.h>
  11
  12#include <asm/paravirt.h>
  13
  14#include <xen/interface/xen.h>
  15#include <xen/events.h>
  16
  17#include "xen-ops.h"
  18#include "debugfs.h"
  19
  20static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
  21static DEFINE_PER_CPU(char *, irq_name);
  22static bool xen_pvspin = true;
  23
  24#include <asm/qspinlock.h>
  25
  26static void xen_qlock_kick(int cpu)
  27{
  28        int irq = per_cpu(lock_kicker_irq, cpu);
  29
  30        /* Don't kick if the target's kicker interrupt is not initialized. */
  31        if (irq == -1)
  32                return;
  33
  34        xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
  35}
  36
  37/*
  38 * Halt the current CPU & release it back to the host
  39 */
  40static void xen_qlock_wait(u8 *byte, u8 val)
  41{
  42        int irq = __this_cpu_read(lock_kicker_irq);
  43
  44        /* If kicker interrupts not initialized yet, just spin */
  45        if (irq == -1)
  46                return;
  47
  48        /* clear pending */
  49        xen_clear_irq_pending(irq);
  50        barrier();
  51
  52        /*
  53         * We check the byte value after clearing pending IRQ to make sure
  54         * that we won't miss a wakeup event because of the clearing.
  55         *
  56         * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
  57         * So it is effectively a memory barrier for x86.
  58         */
  59        if (READ_ONCE(*byte) != val)
  60                return;
  61
  62        /*
  63         * If an interrupt happens here, it will leave the wakeup irq
  64         * pending, which will cause xen_poll_irq() to return
  65         * immediately.
  66         */
  67
  68        /* Block until irq becomes pending (or perhaps a spurious wakeup) */
  69        xen_poll_irq(irq);
  70}
  71
  72static irqreturn_t dummy_handler(int irq, void *dev_id)
  73{
  74        BUG();
  75        return IRQ_HANDLED;
  76}
  77
  78void xen_init_lock_cpu(int cpu)
  79{
  80        int irq;
  81        char *name;
  82
  83        if (!xen_pvspin)
  84                return;
  85
  86        WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
  87             cpu, per_cpu(lock_kicker_irq, cpu));
  88
  89        name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
  90        irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
  91                                     cpu,
  92                                     dummy_handler,
  93                                     IRQF_PERCPU|IRQF_NOBALANCING,
  94                                     name,
  95                                     NULL);
  96
  97        if (irq >= 0) {
  98                disable_irq(irq); /* make sure it's never delivered */
  99                per_cpu(lock_kicker_irq, cpu) = irq;
 100                per_cpu(irq_name, cpu) = name;
 101        }
 102
 103        printk("cpu %d spinlock event irq %d\n", cpu, irq);
 104}
 105
 106void xen_uninit_lock_cpu(int cpu)
 107{
 108        if (!xen_pvspin)
 109                return;
 110
 111        unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
 112        per_cpu(lock_kicker_irq, cpu) = -1;
 113        kfree(per_cpu(irq_name, cpu));
 114        per_cpu(irq_name, cpu) = NULL;
 115}
 116
 117
 118/*
 119 * Our init of PV spinlocks is split in two init functions due to us
 120 * using paravirt patching and jump labels patching and having to do
 121 * all of this before SMP code is invoked.
 122 *
 123 * The paravirt patching needs to be done _before_ the alternative asm code
 124 * is started, otherwise we would not patch the core kernel code.
 125 */
 126void __init xen_init_spinlocks(void)
 127{
 128
 129        if (!xen_pvspin) {
 130                printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
 131                return;
 132        }
 133        printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
 134
 135        __pv_init_lock_hash();
 136        pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
 137        pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
 138        pv_lock_ops.wait = xen_qlock_wait;
 139        pv_lock_ops.kick = xen_qlock_kick;
 140}
 141
 142/*
 143 * While the jump_label init code needs to happend _after_ the jump labels are
 144 * enabled and before SMP is started. Hence we use pre-SMP initcall level
 145 * init. We cannot do it in xen_init_spinlocks as that is done before
 146 * jump labels are activated.
 147 */
 148static __init int xen_init_spinlocks_jump(void)
 149{
 150        if (!xen_pvspin)
 151                return 0;
 152
 153        if (!xen_domain())
 154                return 0;
 155
 156        static_key_slow_inc(&paravirt_ticketlocks_enabled);
 157        return 0;
 158}
 159early_initcall(xen_init_spinlocks_jump);
 160
 161static __init int xen_parse_nopvspin(char *arg)
 162{
 163        xen_pvspin = false;
 164        return 0;
 165}
 166early_param("xen_nopvspin", xen_parse_nopvspin);
 167
 168