linux/arch/x86/xen/spinlock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Split spinlock implementation out into its own file, so it can be
   4 * compiled in a FTRACE-compatible way.
   5 */
   6#include <linux/kernel_stat.h>
   7#include <linux/spinlock.h>
   8#include <linux/debugfs.h>
   9#include <linux/log2.h>
  10#include <linux/gfp.h>
  11#include <linux/slab.h>
  12
  13#include <asm/paravirt.h>
  14#include <asm/qspinlock.h>
  15
  16#include <xen/interface/xen.h>
  17#include <xen/events.h>
  18
  19#include "xen-ops.h"
  20#include "debugfs.h"
  21
  22static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
  23static DEFINE_PER_CPU(char *, irq_name);
  24static bool xen_pvspin = true;
  25
  26static void xen_qlock_kick(int cpu)
  27{
  28        int irq = per_cpu(lock_kicker_irq, cpu);
  29
  30        /* Don't kick if the target's kicker interrupt is not initialized. */
  31        if (irq == -1)
  32                return;
  33
  34        xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
  35}
  36
  37/*
  38 * Halt the current CPU & release it back to the host
  39 */
  40static void xen_qlock_wait(u8 *byte, u8 val)
  41{
  42        int irq = __this_cpu_read(lock_kicker_irq);
  43
  44        /* If kicker interrupts not initialized yet, just spin */
  45        if (irq == -1)
  46                return;
  47
  48        /* clear pending */
  49        xen_clear_irq_pending(irq);
  50        barrier();
  51
  52        /*
  53         * We check the byte value after clearing pending IRQ to make sure
  54         * that we won't miss a wakeup event because of the clearing.
  55         *
  56         * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
  57         * So it is effectively a memory barrier for x86.
  58         */
  59        if (READ_ONCE(*byte) != val)
  60                return;
  61
  62        /*
  63         * If an interrupt happens here, it will leave the wakeup irq
  64         * pending, which will cause xen_poll_irq() to return
  65         * immediately.
  66         */
  67
  68        /* Block until irq becomes pending (or perhaps a spurious wakeup) */
  69        xen_poll_irq(irq);
  70}
  71
  72static irqreturn_t dummy_handler(int irq, void *dev_id)
  73{
  74        BUG();
  75        return IRQ_HANDLED;
  76}
  77
  78void xen_init_lock_cpu(int cpu)
  79{
  80        int irq;
  81        char *name;
  82
  83        if (!xen_pvspin) {
  84                if (cpu == 0)
  85                        static_branch_disable(&virt_spin_lock_key);
  86                return;
  87        }
  88
  89        WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
  90             cpu, per_cpu(lock_kicker_irq, cpu));
  91
  92        name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
  93        irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
  94                                     cpu,
  95                                     dummy_handler,
  96                                     IRQF_PERCPU|IRQF_NOBALANCING,
  97                                     name,
  98                                     NULL);
  99
 100        if (irq >= 0) {
 101                disable_irq(irq); /* make sure it's never delivered */
 102                per_cpu(lock_kicker_irq, cpu) = irq;
 103                per_cpu(irq_name, cpu) = name;
 104        }
 105
 106        printk("cpu %d spinlock event irq %d\n", cpu, irq);
 107}
 108
 109void xen_uninit_lock_cpu(int cpu)
 110{
 111        if (!xen_pvspin)
 112                return;
 113
 114        unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
 115        per_cpu(lock_kicker_irq, cpu) = -1;
 116        kfree(per_cpu(irq_name, cpu));
 117        per_cpu(irq_name, cpu) = NULL;
 118}
 119
 120PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
 121
 122/*
 123 * Our init of PV spinlocks is split in two init functions due to us
 124 * using paravirt patching and jump labels patching and having to do
 125 * all of this before SMP code is invoked.
 126 *
 127 * The paravirt patching needs to be done _before_ the alternative asm code
 128 * is started, otherwise we would not patch the core kernel code.
 129 */
 130void __init xen_init_spinlocks(void)
 131{
 132
 133        if (!xen_pvspin) {
 134                printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
 135                return;
 136        }
 137        printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
 138
 139        __pv_init_lock_hash();
 140        pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
 141        pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
 142        pv_lock_ops.wait = xen_qlock_wait;
 143        pv_lock_ops.kick = xen_qlock_kick;
 144        pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
 145}
 146
 147static __init int xen_parse_nopvspin(char *arg)
 148{
 149        xen_pvspin = false;
 150        return 0;
 151}
 152early_param("xen_nopvspin", xen_parse_nopvspin);
 153
 154