1
2#ifndef __ASM_SPINLOCK_H
3#define __ASM_SPINLOCK_H
4#ifdef __KERNEL__
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/jump_label.h>
19#include <linux/irqflags.h>
20#ifdef CONFIG_PPC64
21#include <asm/paca.h>
22#include <asm/hvcall.h>
23#endif
24#include <asm/synch.h>
25#include <asm/ppc-opcode.h>
26#include <asm/asm-405.h>
27
28#ifdef CONFIG_PPC64
29
30#ifdef __BIG_ENDIAN__
31#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
32#else
33#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
34#endif
35#else
36#define LOCK_TOKEN 1
37#endif
38
39#ifdef CONFIG_PPC_PSERIES
40DECLARE_STATIC_KEY_FALSE(shared_processor);
41
42#define vcpu_is_preempted vcpu_is_preempted
43static inline bool vcpu_is_preempted(int cpu)
44{
45 if (!static_branch_unlikely(&shared_processor))
46 return false;
47 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
48}
49#endif
50
51static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
52{
53 return lock.slock == 0;
54}
55
56static inline int arch_spin_is_locked(arch_spinlock_t *lock)
57{
58 smp_mb();
59 return !arch_spin_value_unlocked(*lock);
60}
61
62
63
64
65
66static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
67{
68 unsigned long tmp, token;
69
70 token = LOCK_TOKEN;
71 __asm__ __volatile__(
72"1: " PPC_LWARX(%0,0,%2,1) "\n\
73 cmpwi 0,%0,0\n\
74 bne- 2f\n\
75 stwcx. %1,0,%2\n\
76 bne- 1b\n"
77 PPC_ACQUIRE_BARRIER
78"2:"
79 : "=&r" (tmp)
80 : "r" (token), "r" (&lock->slock)
81 : "cr0", "memory");
82
83 return tmp;
84}
85
86static inline int arch_spin_trylock(arch_spinlock_t *lock)
87{
88 return __arch_spin_trylock(lock) == 0;
89}
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105#if defined(CONFIG_PPC_SPLPAR)
106
107void splpar_spin_yield(arch_spinlock_t *lock);
108void splpar_rw_yield(arch_rwlock_t *lock);
109#else
110static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
111static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
112#endif
113
114static inline bool is_shared_processor(void)
115{
116#ifdef CONFIG_PPC_SPLPAR
117 return static_branch_unlikely(&shared_processor);
118#else
119 return false;
120#endif
121}
122
123static inline void spin_yield(arch_spinlock_t *lock)
124{
125 if (is_shared_processor())
126 splpar_spin_yield(lock);
127 else
128 barrier();
129}
130
131static inline void rw_yield(arch_rwlock_t *lock)
132{
133 if (is_shared_processor())
134 splpar_rw_yield(lock);
135 else
136 barrier();
137}
138
139static inline void arch_spin_lock(arch_spinlock_t *lock)
140{
141 while (1) {
142 if (likely(__arch_spin_trylock(lock) == 0))
143 break;
144 do {
145 HMT_low();
146 if (is_shared_processor())
147 splpar_spin_yield(lock);
148 } while (unlikely(lock->slock != 0));
149 HMT_medium();
150 }
151}
152
153static inline
154void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
155{
156 unsigned long flags_dis;
157
158 while (1) {
159 if (likely(__arch_spin_trylock(lock) == 0))
160 break;
161 local_save_flags(flags_dis);
162 local_irq_restore(flags);
163 do {
164 HMT_low();
165 if (is_shared_processor())
166 splpar_spin_yield(lock);
167 } while (unlikely(lock->slock != 0));
168 HMT_medium();
169 local_irq_restore(flags_dis);
170 }
171}
172#define arch_spin_lock_flags arch_spin_lock_flags
173
174static inline void arch_spin_unlock(arch_spinlock_t *lock)
175{
176 __asm__ __volatile__("# arch_spin_unlock\n\t"
177 PPC_RELEASE_BARRIER: : :"memory");
178 lock->slock = 0;
179}
180
181
182
183
184
185
186
187
188
189
190
191
192#ifdef CONFIG_PPC64
193#define __DO_SIGN_EXTEND "extsw %0,%0\n"
194#define WRLOCK_TOKEN LOCK_TOKEN
195#else
196#define __DO_SIGN_EXTEND
197#define WRLOCK_TOKEN (-1)
198#endif
199
200
201
202
203
204static inline long __arch_read_trylock(arch_rwlock_t *rw)
205{
206 long tmp;
207
208 __asm__ __volatile__(
209"1: " PPC_LWARX(%0,0,%1,1) "\n"
210 __DO_SIGN_EXTEND
211" addic. %0,%0,1\n\
212 ble- 2f\n"
213 PPC405_ERR77(0,%1)
214" stwcx. %0,0,%1\n\
215 bne- 1b\n"
216 PPC_ACQUIRE_BARRIER
217"2:" : "=&r" (tmp)
218 : "r" (&rw->lock)
219 : "cr0", "xer", "memory");
220
221 return tmp;
222}
223
224
225
226
227
228static inline long __arch_write_trylock(arch_rwlock_t *rw)
229{
230 long tmp, token;
231
232 token = WRLOCK_TOKEN;
233 __asm__ __volatile__(
234"1: " PPC_LWARX(%0,0,%2,1) "\n\
235 cmpwi 0,%0,0\n\
236 bne- 2f\n"
237 PPC405_ERR77(0,%1)
238" stwcx. %1,0,%2\n\
239 bne- 1b\n"
240 PPC_ACQUIRE_BARRIER
241"2:" : "=&r" (tmp)
242 : "r" (token), "r" (&rw->lock)
243 : "cr0", "memory");
244
245 return tmp;
246}
247
248static inline void arch_read_lock(arch_rwlock_t *rw)
249{
250 while (1) {
251 if (likely(__arch_read_trylock(rw) > 0))
252 break;
253 do {
254 HMT_low();
255 if (is_shared_processor())
256 splpar_rw_yield(rw);
257 } while (unlikely(rw->lock < 0));
258 HMT_medium();
259 }
260}
261
262static inline void arch_write_lock(arch_rwlock_t *rw)
263{
264 while (1) {
265 if (likely(__arch_write_trylock(rw) == 0))
266 break;
267 do {
268 HMT_low();
269 if (is_shared_processor())
270 splpar_rw_yield(rw);
271 } while (unlikely(rw->lock != 0));
272 HMT_medium();
273 }
274}
275
276static inline int arch_read_trylock(arch_rwlock_t *rw)
277{
278 return __arch_read_trylock(rw) > 0;
279}
280
281static inline int arch_write_trylock(arch_rwlock_t *rw)
282{
283 return __arch_write_trylock(rw) == 0;
284}
285
286static inline void arch_read_unlock(arch_rwlock_t *rw)
287{
288 long tmp;
289
290 __asm__ __volatile__(
291 "# read_unlock\n\t"
292 PPC_RELEASE_BARRIER
293"1: lwarx %0,0,%1\n\
294 addic %0,%0,-1\n"
295 PPC405_ERR77(0,%1)
296" stwcx. %0,0,%1\n\
297 bne- 1b"
298 : "=&r"(tmp)
299 : "r"(&rw->lock)
300 : "cr0", "xer", "memory");
301}
302
303static inline void arch_write_unlock(arch_rwlock_t *rw)
304{
305 __asm__ __volatile__("# write_unlock\n\t"
306 PPC_RELEASE_BARRIER: : :"memory");
307 rw->lock = 0;
308}
309
310#define arch_spin_relax(lock) spin_yield(lock)
311#define arch_read_relax(lock) rw_yield(lock)
312#define arch_write_relax(lock) rw_yield(lock)
313
314
315#define smp_mb__after_spinlock() smp_mb()
316
317#endif
318#endif
319