1
2
3
4
5
6
7
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <linux/smp.h>
13#include <asm/barrier.h>
14#include <asm/processor.h>
15
16#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
17
18extern int spin_retry;
19
20static inline int
21_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
22{
23 return __sync_bool_compare_and_swap(lock, old, new);
24}
25
26
27
28
29
30
31
32
33
34
35void arch_lock_relax(unsigned int cpu);
36
37void arch_spin_lock_wait(arch_spinlock_t *);
38int arch_spin_trylock_retry(arch_spinlock_t *);
39void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
40
41static inline void arch_spin_relax(arch_spinlock_t *lock)
42{
43 arch_lock_relax(lock->lock);
44}
45
46static inline u32 arch_spin_lockval(int cpu)
47{
48 return ~cpu;
49}
50
51static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
52{
53 return lock.lock == 0;
54}
55
56static inline int arch_spin_is_locked(arch_spinlock_t *lp)
57{
58 return ACCESS_ONCE(lp->lock) != 0;
59}
60
61static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
62{
63 barrier();
64 return likely(arch_spin_value_unlocked(*lp) &&
65 _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
66}
67
68static inline void arch_spin_lock(arch_spinlock_t *lp)
69{
70 if (!arch_spin_trylock_once(lp))
71 arch_spin_lock_wait(lp);
72}
73
74static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
75 unsigned long flags)
76{
77 if (!arch_spin_trylock_once(lp))
78 arch_spin_lock_wait_flags(lp, flags);
79}
80
81static inline int arch_spin_trylock(arch_spinlock_t *lp)
82{
83 if (!arch_spin_trylock_once(lp))
84 return arch_spin_trylock_retry(lp);
85 return 1;
86}
87
88static inline void arch_spin_unlock(arch_spinlock_t *lp)
89{
90 typecheck(unsigned int, lp->lock);
91 asm volatile(
92 "st %1,%0\n"
93 : "+Q" (lp->lock)
94 : "d" (0)
95 : "cc", "memory");
96}
97
98static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
99{
100 while (arch_spin_is_locked(lock))
101 arch_spin_relax(lock);
102 smp_acquire__after_ctrl_dep();
103}
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
121
122
123
124
125
126#define arch_write_can_lock(x) ((x)->lock == 0)
127
128extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
129extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
130
131#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
132#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
133
134static inline int arch_read_trylock_once(arch_rwlock_t *rw)
135{
136 unsigned int old = ACCESS_ONCE(rw->lock);
137 return likely((int) old >= 0 &&
138 _raw_compare_and_swap(&rw->lock, old, old + 1));
139}
140
141static inline int arch_write_trylock_once(arch_rwlock_t *rw)
142{
143 unsigned int old = ACCESS_ONCE(rw->lock);
144 return likely(old == 0 &&
145 _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
146}
147
148#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
149
150#define __RAW_OP_OR "lao"
151#define __RAW_OP_AND "lan"
152#define __RAW_OP_ADD "laa"
153
154#define __RAW_LOCK(ptr, op_val, op_string) \
155({ \
156 unsigned int old_val; \
157 \
158 typecheck(unsigned int *, ptr); \
159 asm volatile( \
160 op_string " %0,%2,%1\n" \
161 "bcr 14,0\n" \
162 : "=d" (old_val), "+Q" (*ptr) \
163 : "d" (op_val) \
164 : "cc", "memory"); \
165 old_val; \
166})
167
168#define __RAW_UNLOCK(ptr, op_val, op_string) \
169({ \
170 unsigned int old_val; \
171 \
172 typecheck(unsigned int *, ptr); \
173 asm volatile( \
174 op_string " %0,%2,%1\n" \
175 : "=d" (old_val), "+Q" (*ptr) \
176 : "d" (op_val) \
177 : "cc", "memory"); \
178 old_val; \
179})
180
181extern void _raw_read_lock_wait(arch_rwlock_t *lp);
182extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
183
184static inline void arch_read_lock(arch_rwlock_t *rw)
185{
186 unsigned int old;
187
188 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
189 if ((int) old < 0)
190 _raw_read_lock_wait(rw);
191}
192
193static inline void arch_read_unlock(arch_rwlock_t *rw)
194{
195 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
196}
197
198static inline void arch_write_lock(arch_rwlock_t *rw)
199{
200 unsigned int old;
201
202 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
203 if (old != 0)
204 _raw_write_lock_wait(rw, old);
205 rw->owner = SPINLOCK_LOCKVAL;
206}
207
208static inline void arch_write_unlock(arch_rwlock_t *rw)
209{
210 rw->owner = 0;
211 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
212}
213
214#else
215
216extern void _raw_read_lock_wait(arch_rwlock_t *lp);
217extern void _raw_write_lock_wait(arch_rwlock_t *lp);
218
219static inline void arch_read_lock(arch_rwlock_t *rw)
220{
221 if (!arch_read_trylock_once(rw))
222 _raw_read_lock_wait(rw);
223}
224
225static inline void arch_read_unlock(arch_rwlock_t *rw)
226{
227 unsigned int old;
228
229 do {
230 old = ACCESS_ONCE(rw->lock);
231 } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
232}
233
234static inline void arch_write_lock(arch_rwlock_t *rw)
235{
236 if (!arch_write_trylock_once(rw))
237 _raw_write_lock_wait(rw);
238 rw->owner = SPINLOCK_LOCKVAL;
239}
240
241static inline void arch_write_unlock(arch_rwlock_t *rw)
242{
243 typecheck(unsigned int, rw->lock);
244
245 rw->owner = 0;
246 asm volatile(
247 "st %1,%0\n"
248 : "+Q" (rw->lock)
249 : "d" (0)
250 : "cc", "memory");
251}
252
253#endif
254
255static inline int arch_read_trylock(arch_rwlock_t *rw)
256{
257 if (!arch_read_trylock_once(rw))
258 return _raw_read_trylock_retry(rw);
259 return 1;
260}
261
262static inline int arch_write_trylock(arch_rwlock_t *rw)
263{
264 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
265 return 0;
266 rw->owner = SPINLOCK_LOCKVAL;
267 return 1;
268}
269
270static inline void arch_read_relax(arch_rwlock_t *rw)
271{
272 arch_lock_relax(rw->owner);
273}
274
275static inline void arch_write_relax(arch_rwlock_t *rw)
276{
277 arch_lock_relax(rw->owner);
278}
279
280#endif
281