1
2#ifndef __ASM_SPINLOCK_H
3#define __ASM_SPINLOCK_H
4#ifdef __KERNEL__
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/irqflags.h>
19#ifdef CONFIG_PPC64
20#include <asm/paca.h>
21#include <asm/hvcall.h>
22#endif
23#include <asm/synch.h>
24#include <asm/ppc-opcode.h>
25#include <asm/asm-405.h>
26
27#ifdef CONFIG_PPC64
28
29#ifdef __BIG_ENDIAN__
30#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
31#else
32#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
33#endif
34#else
35#define LOCK_TOKEN 1
36#endif
37
38#ifdef CONFIG_PPC_PSERIES
39#define vcpu_is_preempted vcpu_is_preempted
40static inline bool vcpu_is_preempted(int cpu)
41{
42 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
43 return false;
44 return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
45}
46#endif
47
48static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
49{
50 return lock.slock == 0;
51}
52
53static inline int arch_spin_is_locked(arch_spinlock_t *lock)
54{
55 smp_mb();
56 return !arch_spin_value_unlocked(*lock);
57}
58
59
60
61
62
63static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
64{
65 unsigned long tmp, token;
66
67 token = LOCK_TOKEN;
68 __asm__ __volatile__(
69"1: " PPC_LWARX(%0,0,%2,1) "\n\
70 cmpwi 0,%0,0\n\
71 bne- 2f\n\
72 stwcx. %1,0,%2\n\
73 bne- 1b\n"
74 PPC_ACQUIRE_BARRIER
75"2:"
76 : "=&r" (tmp)
77 : "r" (token), "r" (&lock->slock)
78 : "cr0", "memory");
79
80 return tmp;
81}
82
83static inline int arch_spin_trylock(arch_spinlock_t *lock)
84{
85 return __arch_spin_trylock(lock) == 0;
86}
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#if defined(CONFIG_PPC_SPLPAR)
103
104void splpar_spin_yield(arch_spinlock_t *lock);
105void splpar_rw_yield(arch_rwlock_t *lock);
106#else
107static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
108static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
109#endif
110
111static inline bool is_shared_processor(void)
112{
113
114
115
116
117#ifdef CONFIG_PPC_PSERIES
118 return (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
119 lppaca_shared_proc(local_paca->lppaca_ptr));
120#else
121 return false;
122#endif
123}
124
125static inline void spin_yield(arch_spinlock_t *lock)
126{
127 if (is_shared_processor())
128 splpar_spin_yield(lock);
129 else
130 barrier();
131}
132
133static inline void rw_yield(arch_rwlock_t *lock)
134{
135 if (is_shared_processor())
136 splpar_rw_yield(lock);
137 else
138 barrier();
139}
140
141static inline void arch_spin_lock(arch_spinlock_t *lock)
142{
143 while (1) {
144 if (likely(__arch_spin_trylock(lock) == 0))
145 break;
146 do {
147 HMT_low();
148 if (is_shared_processor())
149 splpar_spin_yield(lock);
150 } while (unlikely(lock->slock != 0));
151 HMT_medium();
152 }
153}
154
155static inline
156void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
157{
158 unsigned long flags_dis;
159
160 while (1) {
161 if (likely(__arch_spin_trylock(lock) == 0))
162 break;
163 local_save_flags(flags_dis);
164 local_irq_restore(flags);
165 do {
166 HMT_low();
167 if (is_shared_processor())
168 splpar_spin_yield(lock);
169 } while (unlikely(lock->slock != 0));
170 HMT_medium();
171 local_irq_restore(flags_dis);
172 }
173}
174#define arch_spin_lock_flags arch_spin_lock_flags
175
176static inline void arch_spin_unlock(arch_spinlock_t *lock)
177{
178 __asm__ __volatile__("# arch_spin_unlock\n\t"
179 PPC_RELEASE_BARRIER: : :"memory");
180 lock->slock = 0;
181}
182
183
184
185
186
187
188
189
190
191
192
193
194#ifdef CONFIG_PPC64
195#define __DO_SIGN_EXTEND "extsw %0,%0\n"
196#define WRLOCK_TOKEN LOCK_TOKEN
197#else
198#define __DO_SIGN_EXTEND
199#define WRLOCK_TOKEN (-1)
200#endif
201
202
203
204
205
206static inline long __arch_read_trylock(arch_rwlock_t *rw)
207{
208 long tmp;
209
210 __asm__ __volatile__(
211"1: " PPC_LWARX(%0,0,%1,1) "\n"
212 __DO_SIGN_EXTEND
213" addic. %0,%0,1\n\
214 ble- 2f\n"
215 PPC405_ERR77(0,%1)
216" stwcx. %0,0,%1\n\
217 bne- 1b\n"
218 PPC_ACQUIRE_BARRIER
219"2:" : "=&r" (tmp)
220 : "r" (&rw->lock)
221 : "cr0", "xer", "memory");
222
223 return tmp;
224}
225
226
227
228
229
230static inline long __arch_write_trylock(arch_rwlock_t *rw)
231{
232 long tmp, token;
233
234 token = WRLOCK_TOKEN;
235 __asm__ __volatile__(
236"1: " PPC_LWARX(%0,0,%2,1) "\n\
237 cmpwi 0,%0,0\n\
238 bne- 2f\n"
239 PPC405_ERR77(0,%1)
240" stwcx. %1,0,%2\n\
241 bne- 1b\n"
242 PPC_ACQUIRE_BARRIER
243"2:" : "=&r" (tmp)
244 : "r" (token), "r" (&rw->lock)
245 : "cr0", "memory");
246
247 return tmp;
248}
249
250static inline void arch_read_lock(arch_rwlock_t *rw)
251{
252 while (1) {
253 if (likely(__arch_read_trylock(rw) > 0))
254 break;
255 do {
256 HMT_low();
257 if (is_shared_processor())
258 splpar_rw_yield(rw);
259 } while (unlikely(rw->lock < 0));
260 HMT_medium();
261 }
262}
263
264static inline void arch_write_lock(arch_rwlock_t *rw)
265{
266 while (1) {
267 if (likely(__arch_write_trylock(rw) == 0))
268 break;
269 do {
270 HMT_low();
271 if (is_shared_processor())
272 splpar_rw_yield(rw);
273 } while (unlikely(rw->lock != 0));
274 HMT_medium();
275 }
276}
277
278static inline int arch_read_trylock(arch_rwlock_t *rw)
279{
280 return __arch_read_trylock(rw) > 0;
281}
282
283static inline int arch_write_trylock(arch_rwlock_t *rw)
284{
285 return __arch_write_trylock(rw) == 0;
286}
287
288static inline void arch_read_unlock(arch_rwlock_t *rw)
289{
290 long tmp;
291
292 __asm__ __volatile__(
293 "# read_unlock\n\t"
294 PPC_RELEASE_BARRIER
295"1: lwarx %0,0,%1\n\
296 addic %0,%0,-1\n"
297 PPC405_ERR77(0,%1)
298" stwcx. %0,0,%1\n\
299 bne- 1b"
300 : "=&r"(tmp)
301 : "r"(&rw->lock)
302 : "cr0", "xer", "memory");
303}
304
305static inline void arch_write_unlock(arch_rwlock_t *rw)
306{
307 __asm__ __volatile__("# write_unlock\n\t"
308 PPC_RELEASE_BARRIER: : :"memory");
309 rw->lock = 0;
310}
311
312#define arch_spin_relax(lock) spin_yield(lock)
313#define arch_read_relax(lock) rw_yield(lock)
314#define arch_write_relax(lock) rw_yield(lock)
315
316
317#define smp_mb__after_spinlock() smp_mb()
318
319#endif
320#endif
321