1
2
3
4
5
6
7
8
9#ifndef __ASM_SPINLOCK_H
10#define __ASM_SPINLOCK_H
11
12#include <asm/spinlock_types.h>
13#include <asm/processor.h>
14#include <asm/barrier.h>
15
16#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18
19#ifdef CONFIG_ARC_HAS_LLSC
20
21static inline void arch_spin_lock(arch_spinlock_t *lock)
22{
23 unsigned int val;
24
25 smp_mb();
26
27 __asm__ __volatile__(
28 "1: llock %[val], [%[slock]] \n"
29 " breq %[val], %[LOCKED], 1b \n"
30 " scond %[LOCKED], [%[slock]] \n"
31 " bnz 1b \n"
32 " \n"
33 : [val] "=&r" (val)
34 : [slock] "r" (&(lock->slock)),
35 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
36 : "memory", "cc");
37
38 smp_mb();
39}
40
41
42static inline int arch_spin_trylock(arch_spinlock_t *lock)
43{
44 unsigned int val, got_it = 0;
45
46 smp_mb();
47
48 __asm__ __volatile__(
49 "1: llock %[val], [%[slock]] \n"
50 " breq %[val], %[LOCKED], 4f \n"
51 " scond %[LOCKED], [%[slock]] \n"
52 " bnz 1b \n"
53 " mov %[got_it], 1 \n"
54 "4: \n"
55 " \n"
56 : [val] "=&r" (val),
57 [got_it] "+&r" (got_it)
58 : [slock] "r" (&(lock->slock)),
59 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
60 : "memory", "cc");
61
62 smp_mb();
63
64 return got_it;
65}
66
67static inline void arch_spin_unlock(arch_spinlock_t *lock)
68{
69 smp_mb();
70
71 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
72
73 smp_mb();
74}
75
76
77
78
79
80
81static inline void arch_read_lock(arch_rwlock_t *rw)
82{
83 unsigned int val;
84
85 smp_mb();
86
87
88
89
90
91
92
93
94
95
96
97 __asm__ __volatile__(
98 "1: llock %[val], [%[rwlock]] \n"
99 " brls %[val], %[WR_LOCKED], 1b\n"
100 " sub %[val], %[val], 1 \n"
101 " scond %[val], [%[rwlock]] \n"
102 " bnz 1b \n"
103 " \n"
104 : [val] "=&r" (val)
105 : [rwlock] "r" (&(rw->counter)),
106 [WR_LOCKED] "ir" (0)
107 : "memory", "cc");
108
109 smp_mb();
110}
111
112
113static inline int arch_read_trylock(arch_rwlock_t *rw)
114{
115 unsigned int val, got_it = 0;
116
117 smp_mb();
118
119 __asm__ __volatile__(
120 "1: llock %[val], [%[rwlock]] \n"
121 " brls %[val], %[WR_LOCKED], 4f\n"
122 " sub %[val], %[val], 1 \n"
123 " scond %[val], [%[rwlock]] \n"
124 " bnz 1b \n"
125 " mov %[got_it], 1 \n"
126 " \n"
127 "4: ; --- done --- \n"
128
129 : [val] "=&r" (val),
130 [got_it] "+&r" (got_it)
131 : [rwlock] "r" (&(rw->counter)),
132 [WR_LOCKED] "ir" (0)
133 : "memory", "cc");
134
135 smp_mb();
136
137 return got_it;
138}
139
140static inline void arch_write_lock(arch_rwlock_t *rw)
141{
142 unsigned int val;
143
144 smp_mb();
145
146
147
148
149
150
151
152
153
154
155
156
157
158 __asm__ __volatile__(
159 "1: llock %[val], [%[rwlock]] \n"
160 " brne %[val], %[UNLOCKED], 1b \n"
161 " mov %[val], %[WR_LOCKED] \n"
162 " scond %[val], [%[rwlock]] \n"
163 " bnz 1b \n"
164 " \n"
165 : [val] "=&r" (val)
166 : [rwlock] "r" (&(rw->counter)),
167 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
168 [WR_LOCKED] "ir" (0)
169 : "memory", "cc");
170
171 smp_mb();
172}
173
174
175static inline int arch_write_trylock(arch_rwlock_t *rw)
176{
177 unsigned int val, got_it = 0;
178
179 smp_mb();
180
181 __asm__ __volatile__(
182 "1: llock %[val], [%[rwlock]] \n"
183 " brne %[val], %[UNLOCKED], 4f \n"
184 " mov %[val], %[WR_LOCKED] \n"
185 " scond %[val], [%[rwlock]] \n"
186 " bnz 1b \n"
187 " mov %[got_it], 1 \n"
188 " \n"
189 "4: ; --- done --- \n"
190
191 : [val] "=&r" (val),
192 [got_it] "+&r" (got_it)
193 : [rwlock] "r" (&(rw->counter)),
194 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
195 [WR_LOCKED] "ir" (0)
196 : "memory", "cc");
197
198 smp_mb();
199
200 return got_it;
201}
202
203static inline void arch_read_unlock(arch_rwlock_t *rw)
204{
205 unsigned int val;
206
207 smp_mb();
208
209
210
211
212 __asm__ __volatile__(
213 "1: llock %[val], [%[rwlock]] \n"
214 " add %[val], %[val], 1 \n"
215 " scond %[val], [%[rwlock]] \n"
216 " bnz 1b \n"
217 " \n"
218 : [val] "=&r" (val)
219 : [rwlock] "r" (&(rw->counter))
220 : "memory", "cc");
221
222 smp_mb();
223}
224
225static inline void arch_write_unlock(arch_rwlock_t *rw)
226{
227 smp_mb();
228
229 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
230
231 smp_mb();
232}
233
234#else
235
236static inline void arch_spin_lock(arch_spinlock_t *lock)
237{
238 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
239
240
241
242
243
244
245
246 smp_mb();
247
248 __asm__ __volatile__(
249 "1: ex %0, [%1] \n"
250#ifdef CONFIG_EZNPS_MTM_EXT
251 " .word %3 \n"
252#endif
253 " breq %0, %2, 1b \n"
254 : "+&r" (val)
255 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
256#ifdef CONFIG_EZNPS_MTM_EXT
257 , "i"(CTOP_INST_SCHD_RW)
258#endif
259 : "memory");
260
261
262
263
264
265
266
267
268
269 smp_mb();
270}
271
272
273static inline int arch_spin_trylock(arch_spinlock_t *lock)
274{
275 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
276
277 smp_mb();
278
279 __asm__ __volatile__(
280 "1: ex %0, [%1] \n"
281 : "+r" (val)
282 : "r"(&(lock->slock))
283 : "memory");
284
285 smp_mb();
286
287 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
288}
289
290static inline void arch_spin_unlock(arch_spinlock_t *lock)
291{
292 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
293
294
295
296
297
298 smp_mb();
299
300
301
302
303
304
305
306 __asm__ __volatile__(
307 " ex %0, [%1] \n"
308 : "+r" (val)
309 : "r"(&(lock->slock))
310 : "memory");
311
312
313
314
315
316 smp_mb();
317}
318
319
320
321
322
323
324
325
326
327
328static inline int arch_read_trylock(arch_rwlock_t *rw)
329{
330 int ret = 0;
331 unsigned long flags;
332
333 local_irq_save(flags);
334 arch_spin_lock(&(rw->lock_mutex));
335
336
337
338
339
340 if (rw->counter > 0) {
341 rw->counter--;
342 ret = 1;
343 }
344
345 arch_spin_unlock(&(rw->lock_mutex));
346 local_irq_restore(flags);
347
348 smp_mb();
349 return ret;
350}
351
352
353static inline int arch_write_trylock(arch_rwlock_t *rw)
354{
355 int ret = 0;
356 unsigned long flags;
357
358 local_irq_save(flags);
359 arch_spin_lock(&(rw->lock_mutex));
360
361
362
363
364
365
366
367 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
368 rw->counter = 0;
369 ret = 1;
370 }
371 arch_spin_unlock(&(rw->lock_mutex));
372 local_irq_restore(flags);
373
374 return ret;
375}
376
377static inline void arch_read_lock(arch_rwlock_t *rw)
378{
379 while (!arch_read_trylock(rw))
380 cpu_relax();
381}
382
383static inline void arch_write_lock(arch_rwlock_t *rw)
384{
385 while (!arch_write_trylock(rw))
386 cpu_relax();
387}
388
389static inline void arch_read_unlock(arch_rwlock_t *rw)
390{
391 unsigned long flags;
392
393 local_irq_save(flags);
394 arch_spin_lock(&(rw->lock_mutex));
395 rw->counter++;
396 arch_spin_unlock(&(rw->lock_mutex));
397 local_irq_restore(flags);
398}
399
400static inline void arch_write_unlock(arch_rwlock_t *rw)
401{
402 unsigned long flags;
403
404 local_irq_save(flags);
405 arch_spin_lock(&(rw->lock_mutex));
406 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
407 arch_spin_unlock(&(rw->lock_mutex));
408 local_irq_restore(flags);
409}
410
411#endif
412
413#define arch_read_can_lock(x) ((x)->counter > 0)
414#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
415
416#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
417#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
418
419#define arch_spin_relax(lock) cpu_relax()
420#define arch_read_relax(lock) cpu_relax()
421#define arch_write_relax(lock) cpu_relax()
422
423#endif
424