1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/spinlock.h>
8#include <linux/slab.h>
9#include <linux/atomic.h>
10
11#include <asm/paravirt.h>
12#include <asm/qspinlock.h>
13
14#include <xen/events.h>
15
16#include "xen-ops.h"
17
18static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
19static DEFINE_PER_CPU(char *, irq_name);
20static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
21static bool xen_pvspin = true;
22
23static void xen_qlock_kick(int cpu)
24{
25 int irq = per_cpu(lock_kicker_irq, cpu);
26
27
28 if (irq == -1)
29 return;
30
31 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
32}
33
34
35
36
37static void xen_qlock_wait(u8 *byte, u8 val)
38{
39 int irq = __this_cpu_read(lock_kicker_irq);
40 atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
41
42
43 if (irq == -1 || in_nmi())
44 return;
45
46
47 atomic_inc(nest_cnt);
48
49
50 if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
51 xen_clear_irq_pending(irq);
52 } else if (READ_ONCE(*byte) == val) {
53
54 xen_poll_irq(irq);
55 }
56
57 atomic_dec(nest_cnt);
58}
59
60static irqreturn_t dummy_handler(int irq, void *dev_id)
61{
62 BUG();
63 return IRQ_HANDLED;
64}
65
66void xen_init_lock_cpu(int cpu)
67{
68 int irq;
69 char *name;
70
71 if (!xen_pvspin) {
72 if (cpu == 0)
73 static_branch_disable(&virt_spin_lock_key);
74 return;
75 }
76
77 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
78 cpu, per_cpu(lock_kicker_irq, cpu));
79
80 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
81 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
82 cpu,
83 dummy_handler,
84 IRQF_PERCPU|IRQF_NOBALANCING,
85 name,
86 NULL);
87
88 if (irq >= 0) {
89 disable_irq(irq);
90 per_cpu(lock_kicker_irq, cpu) = irq;
91 per_cpu(irq_name, cpu) = name;
92 }
93
94 printk("cpu %d spinlock event irq %d\n", cpu, irq);
95}
96
97void xen_uninit_lock_cpu(int cpu)
98{
99 if (!xen_pvspin)
100 return;
101
102 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
103 per_cpu(lock_kicker_irq, cpu) = -1;
104 kfree(per_cpu(irq_name, cpu));
105 per_cpu(irq_name, cpu) = NULL;
106}
107
108PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
109
110
111
112
113
114
115
116
117
118void __init xen_init_spinlocks(void)
119{
120
121
122 if (num_possible_cpus() == 1)
123 xen_pvspin = false;
124
125 if (!xen_pvspin) {
126 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
127 return;
128 }
129 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
130
131 __pv_init_lock_hash();
132 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
133 pv_ops.lock.queued_spin_unlock =
134 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
135 pv_ops.lock.wait = xen_qlock_wait;
136 pv_ops.lock.kick = xen_qlock_kick;
137 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
138}
139
140static __init int xen_parse_nopvspin(char *arg)
141{
142 xen_pvspin = false;
143 return 0;
144}
145early_param("xen_nopvspin", xen_parse_nopvspin);
146
147