1
2#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
5
6
7
8
9#ifdef __KERNEL__
10#include <linux/types.h>
11#include <asm/cmpxchg.h>
12#include <asm/barrier.h>
13
14
15
16
17
18
19#define __atomic_op_acquire(op, args...) \
20({ \
21 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
22 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory"); \
23 __ret; \
24})
25
26#define __atomic_op_release(op, args...) \
27({ \
28 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory"); \
29 op##_relaxed(args); \
30})
31
32static __inline__ int atomic_read(const atomic_t *v)
33{
34 int t;
35
36 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
37
38 return t;
39}
40
41static __inline__ void atomic_set(atomic_t *v, int i)
42{
43 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
44}
45
46#define ATOMIC_OP(op, asm_op) \
47static __inline__ void atomic_##op(int a, atomic_t *v) \
48{ \
49 int t; \
50 \
51 __asm__ __volatile__( \
52"1: lwarx %0,0,%3 # atomic_" #op "\n" \
53 #asm_op " %0,%2,%0\n" \
54 PPC405_ERR77(0,%3) \
55" stwcx. %0,0,%3 \n" \
56" bne- 1b\n" \
57 : "=&r" (t), "+m" (v->counter) \
58 : "r" (a), "r" (&v->counter) \
59 : "cc"); \
60} \
61
62#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
63static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
64{ \
65 int t; \
66 \
67 __asm__ __volatile__( \
68"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
69 #asm_op " %0,%2,%0\n" \
70 PPC405_ERR77(0, %3) \
71" stwcx. %0,0,%3\n" \
72" bne- 1b\n" \
73 : "=&r" (t), "+m" (v->counter) \
74 : "r" (a), "r" (&v->counter) \
75 : "cc"); \
76 \
77 return t; \
78}
79
80#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
81static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
82{ \
83 int res, t; \
84 \
85 __asm__ __volatile__( \
86"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
87 #asm_op " %1,%3,%0\n" \
88 PPC405_ERR77(0, %4) \
89" stwcx. %1,0,%4\n" \
90" bne- 1b\n" \
91 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
92 : "r" (a), "r" (&v->counter) \
93 : "cc"); \
94 \
95 return res; \
96}
97
98#define ATOMIC_OPS(op, asm_op) \
99 ATOMIC_OP(op, asm_op) \
100 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
101 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
102
103ATOMIC_OPS(add, add)
104ATOMIC_OPS(sub, subf)
105
106#define atomic_add_return_relaxed atomic_add_return_relaxed
107#define atomic_sub_return_relaxed atomic_sub_return_relaxed
108
109#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
110#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
111
112#undef ATOMIC_OPS
113#define ATOMIC_OPS(op, asm_op) \
114 ATOMIC_OP(op, asm_op) \
115 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
116
117ATOMIC_OPS(and, and)
118ATOMIC_OPS(or, or)
119ATOMIC_OPS(xor, xor)
120
121#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
122#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
123#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
124
125#undef ATOMIC_OPS
126#undef ATOMIC_FETCH_OP_RELAXED
127#undef ATOMIC_OP_RETURN_RELAXED
128#undef ATOMIC_OP
129
130static __inline__ void atomic_inc(atomic_t *v)
131{
132 int t;
133
134 __asm__ __volatile__(
135"1: lwarx %0,0,%2 # atomic_inc\n\
136 addic %0,%0,1\n"
137 PPC405_ERR77(0,%2)
138" stwcx. %0,0,%2 \n\
139 bne- 1b"
140 : "=&r" (t), "+m" (v->counter)
141 : "r" (&v->counter)
142 : "cc", "xer");
143}
144#define atomic_inc atomic_inc
145
146static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
147{
148 int t;
149
150 __asm__ __volatile__(
151"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
152" addic %0,%0,1\n"
153 PPC405_ERR77(0, %2)
154" stwcx. %0,0,%2\n"
155" bne- 1b"
156 : "=&r" (t), "+m" (v->counter)
157 : "r" (&v->counter)
158 : "cc", "xer");
159
160 return t;
161}
162
163static __inline__ void atomic_dec(atomic_t *v)
164{
165 int t;
166
167 __asm__ __volatile__(
168"1: lwarx %0,0,%2 # atomic_dec\n\
169 addic %0,%0,-1\n"
170 PPC405_ERR77(0,%2)\
171" stwcx. %0,0,%2\n\
172 bne- 1b"
173 : "=&r" (t), "+m" (v->counter)
174 : "r" (&v->counter)
175 : "cc", "xer");
176}
177#define atomic_dec atomic_dec
178
179static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
180{
181 int t;
182
183 __asm__ __volatile__(
184"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
185" addic %0,%0,-1\n"
186 PPC405_ERR77(0, %2)
187" stwcx. %0,0,%2\n"
188" bne- 1b"
189 : "=&r" (t), "+m" (v->counter)
190 : "r" (&v->counter)
191 : "cc", "xer");
192
193 return t;
194}
195
196#define atomic_inc_return_relaxed atomic_inc_return_relaxed
197#define atomic_dec_return_relaxed atomic_dec_return_relaxed
198
199#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
200#define atomic_cmpxchg_relaxed(v, o, n) \
201 cmpxchg_relaxed(&((v)->counter), (o), (n))
202#define atomic_cmpxchg_acquire(v, o, n) \
203 cmpxchg_acquire(&((v)->counter), (o), (n))
204
205#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
206#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
207
208
209
210
211
212
213
214static __always_inline bool
215atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
216{
217 int r, o = *old;
218
219 __asm__ __volatile__ (
220"1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n"
221" cmpw 0,%0,%3 \n"
222" bne- 2f \n"
223" stwcx. %4,0,%2 \n"
224" bne- 1b \n"
225"\t" PPC_ACQUIRE_BARRIER " \n"
226"2: \n"
227 : "=&r" (r), "+m" (v->counter)
228 : "r" (&v->counter), "r" (o), "r" (new)
229 : "cr0", "memory");
230
231 if (unlikely(r != o))
232 *old = r;
233 return likely(r == o);
234}
235
236
237
238
239
240
241
242
243
244
245static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
246{
247 int t;
248
249 __asm__ __volatile__ (
250 PPC_ATOMIC_ENTRY_BARRIER
251"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
252 cmpw 0,%0,%3 \n\
253 beq 2f \n\
254 add %0,%2,%0 \n"
255 PPC405_ERR77(0,%2)
256" stwcx. %0,0,%1 \n\
257 bne- 1b \n"
258 PPC_ATOMIC_EXIT_BARRIER
259" subf %0,%2,%0 \n\
2602:"
261 : "=&r" (t)
262 : "r" (&v->counter), "r" (a), "r" (u)
263 : "cc", "memory");
264
265 return t;
266}
267#define atomic_fetch_add_unless atomic_fetch_add_unless
268
269
270
271
272
273
274
275
276static __inline__ int atomic_inc_not_zero(atomic_t *v)
277{
278 int t1, t2;
279
280 __asm__ __volatile__ (
281 PPC_ATOMIC_ENTRY_BARRIER
282"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
283 cmpwi 0,%0,0\n\
284 beq- 2f\n\
285 addic %1,%0,1\n"
286 PPC405_ERR77(0,%2)
287" stwcx. %1,0,%2\n\
288 bne- 1b\n"
289 PPC_ATOMIC_EXIT_BARRIER
290 "\n\
2912:"
292 : "=&r" (t1), "=&r" (t2)
293 : "r" (&v->counter)
294 : "cc", "xer", "memory");
295
296 return t1;
297}
298#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
299
300
301
302
303
304
305static __inline__ int atomic_dec_if_positive(atomic_t *v)
306{
307 int t;
308
309 __asm__ __volatile__(
310 PPC_ATOMIC_ENTRY_BARRIER
311"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
312 cmpwi %0,1\n\
313 addi %0,%0,-1\n\
314 blt- 2f\n"
315 PPC405_ERR77(0,%1)
316" stwcx. %0,0,%1\n\
317 bne- 1b"
318 PPC_ATOMIC_EXIT_BARRIER
319 "\n\
3202:" : "=&b" (t)
321 : "r" (&v->counter)
322 : "cc", "memory");
323
324 return t;
325}
326#define atomic_dec_if_positive atomic_dec_if_positive
327
328#ifdef __powerpc64__
329
330#define ATOMIC64_INIT(i) { (i) }
331
332static __inline__ long atomic64_read(const atomic64_t *v)
333{
334 long t;
335
336 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
337
338 return t;
339}
340
341static __inline__ void atomic64_set(atomic64_t *v, long i)
342{
343 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
344}
345
346#define ATOMIC64_OP(op, asm_op) \
347static __inline__ void atomic64_##op(long a, atomic64_t *v) \
348{ \
349 long t; \
350 \
351 __asm__ __volatile__( \
352"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
353 #asm_op " %0,%2,%0\n" \
354" stdcx. %0,0,%3 \n" \
355" bne- 1b\n" \
356 : "=&r" (t), "+m" (v->counter) \
357 : "r" (a), "r" (&v->counter) \
358 : "cc"); \
359}
360
361#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
362static inline long \
363atomic64_##op##_return_relaxed(long a, atomic64_t *v) \
364{ \
365 long t; \
366 \
367 __asm__ __volatile__( \
368"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
369 #asm_op " %0,%2,%0\n" \
370" stdcx. %0,0,%3\n" \
371" bne- 1b\n" \
372 : "=&r" (t), "+m" (v->counter) \
373 : "r" (a), "r" (&v->counter) \
374 : "cc"); \
375 \
376 return t; \
377}
378
379#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
380static inline long \
381atomic64_fetch_##op##_relaxed(long a, atomic64_t *v) \
382{ \
383 long res, t; \
384 \
385 __asm__ __volatile__( \
386"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
387 #asm_op " %1,%3,%0\n" \
388" stdcx. %1,0,%4\n" \
389" bne- 1b\n" \
390 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
391 : "r" (a), "r" (&v->counter) \
392 : "cc"); \
393 \
394 return res; \
395}
396
397#define ATOMIC64_OPS(op, asm_op) \
398 ATOMIC64_OP(op, asm_op) \
399 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
400 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
401
402ATOMIC64_OPS(add, add)
403ATOMIC64_OPS(sub, subf)
404
405#define atomic64_add_return_relaxed atomic64_add_return_relaxed
406#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
407
408#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
409#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
410
411#undef ATOMIC64_OPS
412#define ATOMIC64_OPS(op, asm_op) \
413 ATOMIC64_OP(op, asm_op) \
414 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
415
416ATOMIC64_OPS(and, and)
417ATOMIC64_OPS(or, or)
418ATOMIC64_OPS(xor, xor)
419
420#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
421#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
422#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
423
424#undef ATOPIC64_OPS
425#undef ATOMIC64_FETCH_OP_RELAXED
426#undef ATOMIC64_OP_RETURN_RELAXED
427#undef ATOMIC64_OP
428
429static __inline__ void atomic64_inc(atomic64_t *v)
430{
431 long t;
432
433 __asm__ __volatile__(
434"1: ldarx %0,0,%2 # atomic64_inc\n\
435 addic %0,%0,1\n\
436 stdcx. %0,0,%2 \n\
437 bne- 1b"
438 : "=&r" (t), "+m" (v->counter)
439 : "r" (&v->counter)
440 : "cc", "xer");
441}
442#define atomic64_inc atomic64_inc
443
444static __inline__ long atomic64_inc_return_relaxed(atomic64_t *v)
445{
446 long t;
447
448 __asm__ __volatile__(
449"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
450" addic %0,%0,1\n"
451" stdcx. %0,0,%2\n"
452" bne- 1b"
453 : "=&r" (t), "+m" (v->counter)
454 : "r" (&v->counter)
455 : "cc", "xer");
456
457 return t;
458}
459
460static __inline__ void atomic64_dec(atomic64_t *v)
461{
462 long t;
463
464 __asm__ __volatile__(
465"1: ldarx %0,0,%2 # atomic64_dec\n\
466 addic %0,%0,-1\n\
467 stdcx. %0,0,%2\n\
468 bne- 1b"
469 : "=&r" (t), "+m" (v->counter)
470 : "r" (&v->counter)
471 : "cc", "xer");
472}
473#define atomic64_dec atomic64_dec
474
475static __inline__ long atomic64_dec_return_relaxed(atomic64_t *v)
476{
477 long t;
478
479 __asm__ __volatile__(
480"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
481" addic %0,%0,-1\n"
482" stdcx. %0,0,%2\n"
483" bne- 1b"
484 : "=&r" (t), "+m" (v->counter)
485 : "r" (&v->counter)
486 : "cc", "xer");
487
488 return t;
489}
490
491#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
492#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
493
494
495
496
497
498static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
499{
500 long t;
501
502 __asm__ __volatile__(
503 PPC_ATOMIC_ENTRY_BARRIER
504"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
505 addic. %0,%0,-1\n\
506 blt- 2f\n\
507 stdcx. %0,0,%1\n\
508 bne- 1b"
509 PPC_ATOMIC_EXIT_BARRIER
510 "\n\
5112:" : "=&r" (t)
512 : "r" (&v->counter)
513 : "cc", "xer", "memory");
514
515 return t;
516}
517#define atomic64_dec_if_positive atomic64_dec_if_positive
518
519#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
520#define atomic64_cmpxchg_relaxed(v, o, n) \
521 cmpxchg_relaxed(&((v)->counter), (o), (n))
522#define atomic64_cmpxchg_acquire(v, o, n) \
523 cmpxchg_acquire(&((v)->counter), (o), (n))
524
525#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
526#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
527
528
529
530
531
532
533
534
535
536
537static __inline__ long atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
538{
539 long t;
540
541 __asm__ __volatile__ (
542 PPC_ATOMIC_ENTRY_BARRIER
543"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
544 cmpd 0,%0,%3 \n\
545 beq 2f \n\
546 add %0,%2,%0 \n"
547" stdcx. %0,0,%1 \n\
548 bne- 1b \n"
549 PPC_ATOMIC_EXIT_BARRIER
550" subf %0,%2,%0 \n\
5512:"
552 : "=&r" (t)
553 : "r" (&v->counter), "r" (a), "r" (u)
554 : "cc", "memory");
555
556 return t;
557}
558#define atomic64_fetch_add_unless atomic64_fetch_add_unless
559
560
561
562
563
564
565
566
567static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
568{
569 long t1, t2;
570
571 __asm__ __volatile__ (
572 PPC_ATOMIC_ENTRY_BARRIER
573"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
574 cmpdi 0,%0,0\n\
575 beq- 2f\n\
576 addic %1,%0,1\n\
577 stdcx. %1,0,%2\n\
578 bne- 1b\n"
579 PPC_ATOMIC_EXIT_BARRIER
580 "\n\
5812:"
582 : "=&r" (t1), "=&r" (t2)
583 : "r" (&v->counter)
584 : "cc", "xer", "memory");
585
586 return t1 != 0;
587}
588#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
589
590#endif
591
592#endif
593#endif
594