1
2
3
4
5#include <linux/kernel_stat.h>
6#include <linux/spinlock.h>
7#include <linux/debugfs.h>
8#include <linux/log2.h>
9#include <linux/gfp.h>
10#include <linux/slab.h>
11
12#include <asm/paravirt.h>
13
14#include <xen/interface/xen.h>
15#include <xen/events.h>
16
17#include "xen-ops.h"
18#include "debugfs.h"
19
20enum xen_contention_stat {
21 TAKEN_SLOW,
22 TAKEN_SLOW_PICKUP,
23 TAKEN_SLOW_SPURIOUS,
24 RELEASED_SLOW,
25 RELEASED_SLOW_KICKED,
26 NR_CONTENTION_STATS
27};
28
29
30#ifdef CONFIG_XEN_DEBUG_FS
31#define HISTO_BUCKETS 30
32static struct xen_spinlock_stats
33{
34 u32 contention_stats[NR_CONTENTION_STATS];
35 u32 histo_spin_blocked[HISTO_BUCKETS+1];
36 u64 time_blocked;
37} spinlock_stats;
38
39static u8 zero_stats;
40
41static inline void check_zero(void)
42{
43 u8 ret;
44 u8 old = READ_ONCE(zero_stats);
45 if (unlikely(old)) {
46 ret = cmpxchg(&zero_stats, old, 0);
47
48 if (ret == old)
49 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
50 }
51}
52
53static inline void add_stats(enum xen_contention_stat var, u32 val)
54{
55 check_zero();
56 spinlock_stats.contention_stats[var] += val;
57}
58
59static inline u64 spin_time_start(void)
60{
61 return xen_clocksource_read();
62}
63
64static void __spin_time_accum(u64 delta, u32 *array)
65{
66 unsigned index = ilog2(delta);
67
68 check_zero();
69
70 if (index < HISTO_BUCKETS)
71 array[index]++;
72 else
73 array[HISTO_BUCKETS]++;
74}
75
76static inline void spin_time_accum_blocked(u64 start)
77{
78 u32 delta = xen_clocksource_read() - start;
79
80 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
81 spinlock_stats.time_blocked += delta;
82}
83#else
84static inline void add_stats(enum xen_contention_stat var, u32 val)
85{
86}
87
88static inline u64 spin_time_start(void)
89{
90 return 0;
91}
92
93static inline void spin_time_accum_blocked(u64 start)
94{
95}
96#endif
97
98struct xen_lock_waiting {
99 struct arch_spinlock *lock;
100 __ticket_t want;
101};
102
103static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
104static DEFINE_PER_CPU(char *, irq_name);
105static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
106static cpumask_t waiting_cpus;
107
108static bool xen_pvspin = true;
109__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
110{
111 int irq = __this_cpu_read(lock_kicker_irq);
112 struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
113 int cpu = smp_processor_id();
114 u64 start;
115 __ticket_t head;
116 unsigned long flags;
117
118
119 if (irq == -1)
120 return;
121
122 start = spin_time_start();
123
124
125
126
127
128 local_irq_save(flags);
129
130
131
132
133
134
135
136
137
138
139
140
141 w->lock = NULL;
142 smp_wmb();
143 w->want = want;
144 smp_wmb();
145 w->lock = lock;
146
147
148 cpumask_set_cpu(cpu, &waiting_cpus);
149 add_stats(TAKEN_SLOW, 1);
150
151
152 xen_clear_irq_pending(irq);
153
154
155 barrier();
156
157
158
159
160
161 __ticket_enter_slowpath(lock);
162
163
164 smp_mb__after_atomic();
165
166
167
168
169
170 head = READ_ONCE(lock->tickets.head);
171 if (__tickets_equal(head, want)) {
172 add_stats(TAKEN_SLOW_PICKUP, 1);
173 goto out;
174 }
175
176
177 local_irq_restore(flags);
178
179
180
181
182
183
184
185
186 xen_poll_irq(irq);
187 add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
188
189 local_irq_save(flags);
190
191 kstat_incr_irq_this_cpu(irq);
192out:
193 cpumask_clear_cpu(cpu, &waiting_cpus);
194 w->lock = NULL;
195
196 local_irq_restore(flags);
197
198 spin_time_accum_blocked(start);
199}
200PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
201
202static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
203{
204 int cpu;
205
206 add_stats(RELEASED_SLOW, 1);
207
208 for_each_cpu(cpu, &waiting_cpus) {
209 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
210
211
212 if (READ_ONCE(w->lock) == lock &&
213 READ_ONCE(w->want) == next) {
214 add_stats(RELEASED_SLOW_KICKED, 1);
215 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
216 break;
217 }
218 }
219}
220
221static irqreturn_t dummy_handler(int irq, void *dev_id)
222{
223 BUG();
224 return IRQ_HANDLED;
225}
226
227void xen_init_lock_cpu(int cpu)
228{
229 int irq;
230 char *name;
231
232 if (!xen_pvspin)
233 return;
234
235 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
236 cpu, per_cpu(lock_kicker_irq, cpu));
237
238 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
239 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
240 cpu,
241 dummy_handler,
242 IRQF_PERCPU|IRQF_NOBALANCING,
243 name,
244 NULL);
245
246 if (irq >= 0) {
247 disable_irq(irq);
248 per_cpu(lock_kicker_irq, cpu) = irq;
249 per_cpu(irq_name, cpu) = name;
250 }
251
252 printk("cpu %d spinlock event irq %d\n", cpu, irq);
253}
254
255void xen_uninit_lock_cpu(int cpu)
256{
257 if (!xen_pvspin)
258 return;
259
260 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
261 per_cpu(lock_kicker_irq, cpu) = -1;
262 kfree(per_cpu(irq_name, cpu));
263 per_cpu(irq_name, cpu) = NULL;
264}
265
266
267
268
269
270
271
272
273
274
275void __init xen_init_spinlocks(void)
276{
277
278 if (!xen_pvspin) {
279 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
280 return;
281 }
282 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
283 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
284 pv_lock_ops.unlock_kick = xen_unlock_kick;
285}
286
287
288
289
290
291
292
293static __init int xen_init_spinlocks_jump(void)
294{
295 if (!xen_pvspin)
296 return 0;
297
298 if (!xen_domain())
299 return 0;
300
301 static_key_slow_inc(¶virt_ticketlocks_enabled);
302 return 0;
303}
304early_initcall(xen_init_spinlocks_jump);
305
306static __init int xen_parse_nopvspin(char *arg)
307{
308 xen_pvspin = false;
309 return 0;
310}
311early_param("xen_nopvspin", xen_parse_nopvspin);
312
313#ifdef CONFIG_XEN_DEBUG_FS
314
315static struct dentry *d_spin_debug;
316
317static int __init xen_spinlock_debugfs(void)
318{
319 struct dentry *d_xen = xen_init_debugfs();
320
321 if (d_xen == NULL)
322 return -ENOMEM;
323
324 if (!xen_pvspin)
325 return 0;
326
327 d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
328
329 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
330
331 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
332 &spinlock_stats.contention_stats[TAKEN_SLOW]);
333 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
334 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
335 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
336 &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
337
338 debugfs_create_u32("released_slow", 0444, d_spin_debug,
339 &spinlock_stats.contention_stats[RELEASED_SLOW]);
340 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
341 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
342
343 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
344 &spinlock_stats.time_blocked);
345
346 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
347 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
348
349 return 0;
350}
351fs_initcall(xen_spinlock_debugfs);
352
353#endif
354