1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3#ifdef __KERNEL__
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/irqflags.h>
23#ifdef CONFIG_PPC64
24#include <asm/paca.h>
25#include <asm/hvcall.h>
26#endif
27#include <asm/asm-compat.h>
28#include <asm/synch.h>
29#include <asm/ppc-opcode.h>
30
31#define smp_mb__after_unlock_lock() smp_mb()
32
33#ifdef CONFIG_PPC64
34
35#ifdef __BIG_ENDIAN__
36#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
37#else
38#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
39#endif
40#else
41#define LOCK_TOKEN 1
42#endif
43
44#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
45#define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
46#define SYNC_IO do { \
47 if (unlikely(get_paca()->io_sync)) { \
48 mb(); \
49 get_paca()->io_sync = 0; \
50 } \
51 } while (0)
52#else
53#define CLEAR_IO_SYNC
54#define SYNC_IO
55#endif
56
57static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
58{
59 return lock.slock == 0;
60}
61
62static inline int arch_spin_is_locked(arch_spinlock_t *lock)
63{
64 smp_mb();
65 return !arch_spin_value_unlocked(*lock);
66}
67
68
69
70
71
72static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
73{
74 unsigned long tmp, token;
75
76 token = LOCK_TOKEN;
77 __asm__ __volatile__(
78"1: " PPC_LWARX(%0,0,%2,1) "\n\
79 cmpwi 0,%0,0\n\
80 bne- 2f\n\
81 stwcx. %1,0,%2\n\
82 bne- 1b\n"
83 PPC_ACQUIRE_BARRIER
84"2:"
85 : "=&r" (tmp)
86 : "r" (token), "r" (&lock->slock)
87 : "cr0", "memory");
88
89 return tmp;
90}
91
92static inline int arch_spin_trylock(arch_spinlock_t *lock)
93{
94 CLEAR_IO_SYNC;
95 return __arch_spin_trylock(lock) == 0;
96}
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112#if defined(CONFIG_PPC_SPLPAR)
113
114#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
115extern void __spin_yield(arch_spinlock_t *lock);
116extern void __rw_yield(arch_rwlock_t *lock);
117#else
118#define __spin_yield(x) barrier()
119#define __rw_yield(x) barrier()
120#define SHARED_PROCESSOR 0
121#endif
122
123static inline void arch_spin_lock(arch_spinlock_t *lock)
124{
125 CLEAR_IO_SYNC;
126 while (1) {
127 if (likely(__arch_spin_trylock(lock) == 0))
128 break;
129 do {
130 HMT_low();
131 if (SHARED_PROCESSOR)
132 __spin_yield(lock);
133 } while (unlikely(lock->slock != 0));
134 HMT_medium();
135 }
136}
137
138static inline
139void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
140{
141 unsigned long flags_dis;
142
143 CLEAR_IO_SYNC;
144 while (1) {
145 if (likely(__arch_spin_trylock(lock) == 0))
146 break;
147 local_save_flags(flags_dis);
148 local_irq_restore(flags);
149 do {
150 HMT_low();
151 if (SHARED_PROCESSOR)
152 __spin_yield(lock);
153 } while (unlikely(lock->slock != 0));
154 HMT_medium();
155 local_irq_restore(flags_dis);
156 }
157}
158
159static inline void arch_spin_unlock(arch_spinlock_t *lock)
160{
161 SYNC_IO;
162 __asm__ __volatile__("# arch_spin_unlock\n\t"
163 PPC_RELEASE_BARRIER: : :"memory");
164 lock->slock = 0;
165}
166
167#ifdef CONFIG_PPC64
168extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
169#else
170#define arch_spin_unlock_wait(lock) \
171 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
172#endif
173
174
175
176
177
178
179
180
181
182
183
184
185#define arch_read_can_lock(rw) ((rw)->lock >= 0)
186#define arch_write_can_lock(rw) (!(rw)->lock)
187
188#ifdef CONFIG_PPC64
189#define __DO_SIGN_EXTEND "extsw %0,%0\n"
190#define WRLOCK_TOKEN LOCK_TOKEN
191#else
192#define __DO_SIGN_EXTEND
193#define WRLOCK_TOKEN (-1)
194#endif
195
196
197
198
199
200static inline long __arch_read_trylock(arch_rwlock_t *rw)
201{
202 long tmp;
203
204 __asm__ __volatile__(
205"1: " PPC_LWARX(%0,0,%1,1) "\n"
206 __DO_SIGN_EXTEND
207" addic. %0,%0,1\n\
208 ble- 2f\n"
209 PPC405_ERR77(0,%1)
210" stwcx. %0,0,%1\n\
211 bne- 1b\n"
212 PPC_ACQUIRE_BARRIER
213"2:" : "=&r" (tmp)
214 : "r" (&rw->lock)
215 : "cr0", "xer", "memory");
216
217 return tmp;
218}
219
220
221
222
223
224static inline long __arch_write_trylock(arch_rwlock_t *rw)
225{
226 long tmp, token;
227
228 token = WRLOCK_TOKEN;
229 __asm__ __volatile__(
230"1: " PPC_LWARX(%0,0,%2,1) "\n\
231 cmpwi 0,%0,0\n\
232 bne- 2f\n"
233 PPC405_ERR77(0,%1)
234" stwcx. %1,0,%2\n\
235 bne- 1b\n"
236 PPC_ACQUIRE_BARRIER
237"2:" : "=&r" (tmp)
238 : "r" (token), "r" (&rw->lock)
239 : "cr0", "memory");
240
241 return tmp;
242}
243
244static inline void arch_read_lock(arch_rwlock_t *rw)
245{
246 while (1) {
247 if (likely(__arch_read_trylock(rw) > 0))
248 break;
249 do {
250 HMT_low();
251 if (SHARED_PROCESSOR)
252 __rw_yield(rw);
253 } while (unlikely(rw->lock < 0));
254 HMT_medium();
255 }
256}
257
258static inline void arch_write_lock(arch_rwlock_t *rw)
259{
260 while (1) {
261 if (likely(__arch_write_trylock(rw) == 0))
262 break;
263 do {
264 HMT_low();
265 if (SHARED_PROCESSOR)
266 __rw_yield(rw);
267 } while (unlikely(rw->lock != 0));
268 HMT_medium();
269 }
270}
271
272static inline int arch_read_trylock(arch_rwlock_t *rw)
273{
274 return __arch_read_trylock(rw) > 0;
275}
276
277static inline int arch_write_trylock(arch_rwlock_t *rw)
278{
279 return __arch_write_trylock(rw) == 0;
280}
281
282static inline void arch_read_unlock(arch_rwlock_t *rw)
283{
284 long tmp;
285
286 __asm__ __volatile__(
287 "# read_unlock\n\t"
288 PPC_RELEASE_BARRIER
289"1: lwarx %0,0,%1\n\
290 addic %0,%0,-1\n"
291 PPC405_ERR77(0,%1)
292" stwcx. %0,0,%1\n\
293 bne- 1b"
294 : "=&r"(tmp)
295 : "r"(&rw->lock)
296 : "cr0", "xer", "memory");
297}
298
299static inline void arch_write_unlock(arch_rwlock_t *rw)
300{
301 __asm__ __volatile__("# write_unlock\n\t"
302 PPC_RELEASE_BARRIER: : :"memory");
303 rw->lock = 0;
304}
305
306#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
307#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
308
309#define arch_spin_relax(lock) __spin_yield(lock)
310#define arch_read_relax(lock) __rw_yield(lock)
311#define arch_write_relax(lock) __rw_yield(lock)
312
313#endif
314#endif
315