1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
17#include <linux/irqflags.h>
18#include <linux/types.h>
19#include <asm/barrier.h>
20#include <asm/compiler.h>
21#include <asm/cpu-features.h>
22#include <asm/cmpxchg.h>
23#include <asm/war.h>
24
25#define ATOMIC_INIT(i) { (i) }
26
27
28
29
30
31
32
33#define atomic_read(v) ACCESS_ONCE((v)->counter)
34
35
36
37
38
39
40
41
42#define atomic_set(v, i) ((v)->counter = (i))
43
44#define ATOMIC_OP(op, c_op, asm_op) \
45static __inline__ void atomic_##op(int i, atomic_t * v) \
46{ \
47 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
48 int temp; \
49 \
50 __asm__ __volatile__( \
51 " .set arch=r4000 \n" \
52 "1: ll %0, %1 # atomic_" #op " \n" \
53 " " #asm_op " %0, %2 \n" \
54 " sc %0, %1 \n" \
55 " beqzl %0, 1b \n" \
56 " .set mips0 \n" \
57 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
58 : "Ir" (i)); \
59 } else if (kernel_uses_llsc) { \
60 int temp; \
61 \
62 do { \
63 __asm__ __volatile__( \
64 " .set "MIPS_ISA_LEVEL" \n" \
65 " ll %0, %1 # atomic_" #op "\n" \
66 " " #asm_op " %0, %2 \n" \
67 " sc %0, %1 \n" \
68 " .set mips0 \n" \
69 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
70 : "Ir" (i)); \
71 } while (unlikely(!temp)); \
72 } else { \
73 unsigned long flags; \
74 \
75 raw_local_irq_save(flags); \
76 v->counter c_op i; \
77 raw_local_irq_restore(flags); \
78 } \
79}
80
81#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
82static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
83{ \
84 int result; \
85 \
86 smp_mb__before_llsc(); \
87 \
88 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
89 int temp; \
90 \
91 __asm__ __volatile__( \
92 " .set arch=r4000 \n" \
93 "1: ll %1, %2 # atomic_" #op "_return \n" \
94 " " #asm_op " %0, %1, %3 \n" \
95 " sc %0, %2 \n" \
96 " beqzl %0, 1b \n" \
97 " " #asm_op " %0, %1, %3 \n" \
98 " .set mips0 \n" \
99 : "=&r" (result), "=&r" (temp), \
100 "+" GCC_OFF_SMALL_ASM() (v->counter) \
101 : "Ir" (i)); \
102 } else if (kernel_uses_llsc) { \
103 int temp; \
104 \
105 do { \
106 __asm__ __volatile__( \
107 " .set "MIPS_ISA_LEVEL" \n" \
108 " ll %1, %2 # atomic_" #op "_return \n" \
109 " " #asm_op " %0, %1, %3 \n" \
110 " sc %0, %2 \n" \
111 " .set mips0 \n" \
112 : "=&r" (result), "=&r" (temp), \
113 "+" GCC_OFF_SMALL_ASM() (v->counter) \
114 : "Ir" (i)); \
115 } while (unlikely(!result)); \
116 \
117 result = temp; result c_op i; \
118 } else { \
119 unsigned long flags; \
120 \
121 raw_local_irq_save(flags); \
122 result = v->counter; \
123 result c_op i; \
124 v->counter = result; \
125 raw_local_irq_restore(flags); \
126 } \
127 \
128 smp_llsc_mb(); \
129 \
130 return result; \
131}
132
133#define ATOMIC_OPS(op, c_op, asm_op) \
134 ATOMIC_OP(op, c_op, asm_op) \
135 ATOMIC_OP_RETURN(op, c_op, asm_op)
136
137ATOMIC_OPS(add, +=, addu)
138ATOMIC_OPS(sub, -=, subu)
139
140ATOMIC_OP(and, &=, and)
141ATOMIC_OP(or, |=, or)
142ATOMIC_OP(xor, ^=, xor)
143
144#undef ATOMIC_OPS
145#undef ATOMIC_OP_RETURN
146#undef ATOMIC_OP
147
148
149
150
151
152
153
154
155
156static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
157{
158 int result;
159
160 smp_mb__before_llsc();
161
162 if (kernel_uses_llsc && R10000_LLSC_WAR) {
163 int temp;
164
165 __asm__ __volatile__(
166 " .set arch=r4000 \n"
167 "1: ll %1, %2 # atomic_sub_if_positive\n"
168 " subu %0, %1, %3 \n"
169 " bltz %0, 1f \n"
170 " sc %0, %2 \n"
171 " .set noreorder \n"
172 " beqzl %0, 1b \n"
173 " subu %0, %1, %3 \n"
174 " .set reorder \n"
175 "1: \n"
176 " .set mips0 \n"
177 : "=&r" (result), "=&r" (temp),
178 "+" GCC_OFF_SMALL_ASM() (v->counter)
179 : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
180 : "memory");
181 } else if (kernel_uses_llsc) {
182 int temp;
183
184 __asm__ __volatile__(
185 " .set "MIPS_ISA_LEVEL" \n"
186 "1: ll %1, %2 # atomic_sub_if_positive\n"
187 " subu %0, %1, %3 \n"
188 " bltz %0, 1f \n"
189 " sc %0, %2 \n"
190 " .set noreorder \n"
191 " beqz %0, 1b \n"
192 " subu %0, %1, %3 \n"
193 " .set reorder \n"
194 "1: \n"
195 " .set mips0 \n"
196 : "=&r" (result), "=&r" (temp),
197 "+" GCC_OFF_SMALL_ASM() (v->counter)
198 : "Ir" (i));
199 } else {
200 unsigned long flags;
201
202 raw_local_irq_save(flags);
203 result = v->counter;
204 result -= i;
205 if (result >= 0)
206 v->counter = result;
207 raw_local_irq_restore(flags);
208 }
209
210 smp_llsc_mb();
211
212 return result;
213}
214
215#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
216#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
217
218
219
220
221
222
223
224
225
226
227static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
228{
229 int c, old;
230 c = atomic_read(v);
231 for (;;) {
232 if (unlikely(c == (u)))
233 break;
234 old = atomic_cmpxchg((v), c, c + (a));
235 if (likely(old == c))
236 break;
237 c = old;
238 }
239 return c;
240}
241
242#define atomic_dec_return(v) atomic_sub_return(1, (v))
243#define atomic_inc_return(v) atomic_add_return(1, (v))
244
245
246
247
248
249
250
251
252
253
254#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
255
256
257
258
259
260
261
262
263
264#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
265
266
267
268
269
270
271
272
273
274#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
275
276
277
278
279
280#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
281
282
283
284
285
286
287
288#define atomic_inc(v) atomic_add(1, (v))
289
290
291
292
293
294
295
296#define atomic_dec(v) atomic_sub(1, (v))
297
298
299
300
301
302
303
304
305
306
307#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
308
309#ifdef CONFIG_64BIT
310
311#define ATOMIC64_INIT(i) { (i) }
312
313
314
315
316
317
318#define atomic64_read(v) ACCESS_ONCE((v)->counter)
319
320
321
322
323
324
325#define atomic64_set(v, i) ((v)->counter = (i))
326
327#define ATOMIC64_OP(op, c_op, asm_op) \
328static __inline__ void atomic64_##op(long i, atomic64_t * v) \
329{ \
330 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
331 long temp; \
332 \
333 __asm__ __volatile__( \
334 " .set arch=r4000 \n" \
335 "1: lld %0, %1 # atomic64_" #op " \n" \
336 " " #asm_op " %0, %2 \n" \
337 " scd %0, %1 \n" \
338 " beqzl %0, 1b \n" \
339 " .set mips0 \n" \
340 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
341 : "Ir" (i)); \
342 } else if (kernel_uses_llsc) { \
343 long temp; \
344 \
345 do { \
346 __asm__ __volatile__( \
347 " .set "MIPS_ISA_LEVEL" \n" \
348 " lld %0, %1 # atomic64_" #op "\n" \
349 " " #asm_op " %0, %2 \n" \
350 " scd %0, %1 \n" \
351 " .set mips0 \n" \
352 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
353 : "Ir" (i)); \
354 } while (unlikely(!temp)); \
355 } else { \
356 unsigned long flags; \
357 \
358 raw_local_irq_save(flags); \
359 v->counter c_op i; \
360 raw_local_irq_restore(flags); \
361 } \
362}
363
364#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
365static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
366{ \
367 long result; \
368 \
369 smp_mb__before_llsc(); \
370 \
371 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
372 long temp; \
373 \
374 __asm__ __volatile__( \
375 " .set arch=r4000 \n" \
376 "1: lld %1, %2 # atomic64_" #op "_return\n" \
377 " " #asm_op " %0, %1, %3 \n" \
378 " scd %0, %2 \n" \
379 " beqzl %0, 1b \n" \
380 " " #asm_op " %0, %1, %3 \n" \
381 " .set mips0 \n" \
382 : "=&r" (result), "=&r" (temp), \
383 "+" GCC_OFF_SMALL_ASM() (v->counter) \
384 : "Ir" (i)); \
385 } else if (kernel_uses_llsc) { \
386 long temp; \
387 \
388 do { \
389 __asm__ __volatile__( \
390 " .set "MIPS_ISA_LEVEL" \n" \
391 " lld %1, %2 # atomic64_" #op "_return\n" \
392 " " #asm_op " %0, %1, %3 \n" \
393 " scd %0, %2 \n" \
394 " .set mips0 \n" \
395 : "=&r" (result), "=&r" (temp), \
396 "=" GCC_OFF_SMALL_ASM() (v->counter) \
397 : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
398 : "memory"); \
399 } while (unlikely(!result)); \
400 \
401 result = temp; result c_op i; \
402 } else { \
403 unsigned long flags; \
404 \
405 raw_local_irq_save(flags); \
406 result = v->counter; \
407 result c_op i; \
408 v->counter = result; \
409 raw_local_irq_restore(flags); \
410 } \
411 \
412 smp_llsc_mb(); \
413 \
414 return result; \
415}
416
417#define ATOMIC64_OPS(op, c_op, asm_op) \
418 ATOMIC64_OP(op, c_op, asm_op) \
419 ATOMIC64_OP_RETURN(op, c_op, asm_op)
420
421ATOMIC64_OPS(add, +=, daddu)
422ATOMIC64_OPS(sub, -=, dsubu)
423ATOMIC64_OP(and, &=, and)
424ATOMIC64_OP(or, |=, or)
425ATOMIC64_OP(xor, ^=, xor)
426
427#undef ATOMIC64_OPS
428#undef ATOMIC64_OP_RETURN
429#undef ATOMIC64_OP
430
431
432
433
434
435
436
437
438
439
440static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
441{
442 long result;
443
444 smp_mb__before_llsc();
445
446 if (kernel_uses_llsc && R10000_LLSC_WAR) {
447 long temp;
448
449 __asm__ __volatile__(
450 " .set arch=r4000 \n"
451 "1: lld %1, %2 # atomic64_sub_if_positive\n"
452 " dsubu %0, %1, %3 \n"
453 " bltz %0, 1f \n"
454 " scd %0, %2 \n"
455 " .set noreorder \n"
456 " beqzl %0, 1b \n"
457 " dsubu %0, %1, %3 \n"
458 " .set reorder \n"
459 "1: \n"
460 " .set mips0 \n"
461 : "=&r" (result), "=&r" (temp),
462 "=" GCC_OFF_SMALL_ASM() (v->counter)
463 : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
464 : "memory");
465 } else if (kernel_uses_llsc) {
466 long temp;
467
468 __asm__ __volatile__(
469 " .set "MIPS_ISA_LEVEL" \n"
470 "1: lld %1, %2 # atomic64_sub_if_positive\n"
471 " dsubu %0, %1, %3 \n"
472 " bltz %0, 1f \n"
473 " scd %0, %2 \n"
474 " .set noreorder \n"
475 " beqz %0, 1b \n"
476 " dsubu %0, %1, %3 \n"
477 " .set reorder \n"
478 "1: \n"
479 " .set mips0 \n"
480 : "=&r" (result), "=&r" (temp),
481 "+" GCC_OFF_SMALL_ASM() (v->counter)
482 : "Ir" (i));
483 } else {
484 unsigned long flags;
485
486 raw_local_irq_save(flags);
487 result = v->counter;
488 result -= i;
489 if (result >= 0)
490 v->counter = result;
491 raw_local_irq_restore(flags);
492 }
493
494 smp_llsc_mb();
495
496 return result;
497}
498
499#define atomic64_cmpxchg(v, o, n) \
500 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
501#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
502
503
504
505
506
507
508
509
510
511
512static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
513{
514 long c, old;
515 c = atomic64_read(v);
516 for (;;) {
517 if (unlikely(c == (u)))
518 break;
519 old = atomic64_cmpxchg((v), c, c + (a));
520 if (likely(old == c))
521 break;
522 c = old;
523 }
524 return c != (u);
525}
526
527#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
528
529#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
530#define atomic64_inc_return(v) atomic64_add_return(1, (v))
531
532
533
534
535
536
537
538
539
540
541#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
542
543
544
545
546
547
548
549
550
551#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
552
553
554
555
556
557
558
559
560
561#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
562
563
564
565
566
567#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
568
569
570
571
572
573
574
575#define atomic64_inc(v) atomic64_add(1, (v))
576
577
578
579
580
581
582
583#define atomic64_dec(v) atomic64_sub(1, (v))
584
585
586
587
588
589
590
591
592
593
594#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
595
596#endif
597
598#endif
599