1
2#ifndef _ASM_POWERPC_QSPINLOCK_H
3#define _ASM_POWERPC_QSPINLOCK_H
4
5#include <asm-generic/qspinlock_types.h>
6#include <asm/paravirt.h>
7
8#define _Q_PENDING_LOOPS (1 << 9)
9
10#ifdef CONFIG_PARAVIRT_SPINLOCKS
11extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
12extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
13extern void __pv_queued_spin_unlock(struct qspinlock *lock);
14
15static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
16{
17 if (!is_shared_processor())
18 native_queued_spin_lock_slowpath(lock, val);
19 else
20 __pv_queued_spin_lock_slowpath(lock, val);
21}
22
23#define queued_spin_unlock queued_spin_unlock
24static inline void queued_spin_unlock(struct qspinlock *lock)
25{
26 if (!is_shared_processor())
27 smp_store_release(&lock->locked, 0);
28 else
29 __pv_queued_spin_unlock(lock);
30}
31
32#else
33extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
34#endif
35
36static __always_inline void queued_spin_lock(struct qspinlock *lock)
37{
38 u32 val = 0;
39
40 if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
41 return;
42
43 queued_spin_lock_slowpath(lock, val);
44}
45#define queued_spin_lock queued_spin_lock
46
47#ifdef CONFIG_PARAVIRT_SPINLOCKS
48#define SPIN_THRESHOLD (1<<15)
49
50static __always_inline void pv_wait(u8 *ptr, u8 val)
51{
52 if (*ptr != val)
53 return;
54 yield_to_any();
55
56
57
58
59}
60
61static __always_inline void pv_kick(int cpu)
62{
63 prod_cpu(cpu);
64}
65
66extern void __pv_init_lock_hash(void);
67
68static inline void pv_spinlocks_init(void)
69{
70 __pv_init_lock_hash();
71}
72
73#endif
74
75
76
77
78
79
80
81
82#include <asm-generic/qspinlock.h>
83
84#endif
85