1
2#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
5
6
7
8
9#ifdef __KERNEL__
10#include <linux/types.h>
11#include <asm/cmpxchg.h>
12#include <asm/barrier.h>
13
14#define ATOMIC_INIT(i) { (i) }
15
16
17
18
19
20
21#define __atomic_acquire_fence() \
22 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
23
24#define __atomic_release_fence() \
25 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
26
27static __inline__ int atomic_read(const atomic_t *v)
28{
29 int t;
30
31 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
32
33 return t;
34}
35
36static __inline__ void atomic_set(atomic_t *v, int i)
37{
38 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
39}
40
41#define ATOMIC_OP(op, asm_op) \
42static __inline__ void atomic_##op(int a, atomic_t *v) \
43{ \
44 int t; \
45 \
46 __asm__ __volatile__( \
47"1: lwarx %0,0,%3 # atomic_" #op "\n" \
48 #asm_op " %0,%2,%0\n" \
49" stwcx. %0,0,%3 \n" \
50" bne- 1b\n" \
51 : "=&r" (t), "+m" (v->counter) \
52 : "r" (a), "r" (&v->counter) \
53 : "cc"); \
54} \
55
56#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
57static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
58{ \
59 int t; \
60 \
61 __asm__ __volatile__( \
62"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
63 #asm_op " %0,%2,%0\n" \
64" stwcx. %0,0,%3\n" \
65" bne- 1b\n" \
66 : "=&r" (t), "+m" (v->counter) \
67 : "r" (a), "r" (&v->counter) \
68 : "cc"); \
69 \
70 return t; \
71}
72
73#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
74static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
75{ \
76 int res, t; \
77 \
78 __asm__ __volatile__( \
79"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
80 #asm_op " %1,%3,%0\n" \
81" stwcx. %1,0,%4\n" \
82" bne- 1b\n" \
83 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
84 : "r" (a), "r" (&v->counter) \
85 : "cc"); \
86 \
87 return res; \
88}
89
90#define ATOMIC_OPS(op, asm_op) \
91 ATOMIC_OP(op, asm_op) \
92 ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
93 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
94
95ATOMIC_OPS(add, add)
96ATOMIC_OPS(sub, subf)
97
98#define atomic_add_return_relaxed atomic_add_return_relaxed
99#define atomic_sub_return_relaxed atomic_sub_return_relaxed
100
101#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
102#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
103
104#undef ATOMIC_OPS
105#define ATOMIC_OPS(op, asm_op) \
106 ATOMIC_OP(op, asm_op) \
107 ATOMIC_FETCH_OP_RELAXED(op, asm_op)
108
109ATOMIC_OPS(and, and)
110ATOMIC_OPS(or, or)
111ATOMIC_OPS(xor, xor)
112
113#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
114#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
115#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
116
117#undef ATOMIC_OPS
118#undef ATOMIC_FETCH_OP_RELAXED
119#undef ATOMIC_OP_RETURN_RELAXED
120#undef ATOMIC_OP
121
122static __inline__ void atomic_inc(atomic_t *v)
123{
124 int t;
125
126 __asm__ __volatile__(
127"1: lwarx %0,0,%2 # atomic_inc\n\
128 addic %0,%0,1\n"
129" stwcx. %0,0,%2 \n\
130 bne- 1b"
131 : "=&r" (t), "+m" (v->counter)
132 : "r" (&v->counter)
133 : "cc", "xer");
134}
135#define atomic_inc atomic_inc
136
137static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
138{
139 int t;
140
141 __asm__ __volatile__(
142"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
143" addic %0,%0,1\n"
144" stwcx. %0,0,%2\n"
145" bne- 1b"
146 : "=&r" (t), "+m" (v->counter)
147 : "r" (&v->counter)
148 : "cc", "xer");
149
150 return t;
151}
152
153static __inline__ void atomic_dec(atomic_t *v)
154{
155 int t;
156
157 __asm__ __volatile__(
158"1: lwarx %0,0,%2 # atomic_dec\n\
159 addic %0,%0,-1\n"
160" stwcx. %0,0,%2\n\
161 bne- 1b"
162 : "=&r" (t), "+m" (v->counter)
163 : "r" (&v->counter)
164 : "cc", "xer");
165}
166#define atomic_dec atomic_dec
167
168static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
169{
170 int t;
171
172 __asm__ __volatile__(
173"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
174" addic %0,%0,-1\n"
175" stwcx. %0,0,%2\n"
176" bne- 1b"
177 : "=&r" (t), "+m" (v->counter)
178 : "r" (&v->counter)
179 : "cc", "xer");
180
181 return t;
182}
183
184#define atomic_inc_return_relaxed atomic_inc_return_relaxed
185#define atomic_dec_return_relaxed atomic_dec_return_relaxed
186
187#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
188#define atomic_cmpxchg_relaxed(v, o, n) \
189 cmpxchg_relaxed(&((v)->counter), (o), (n))
190#define atomic_cmpxchg_acquire(v, o, n) \
191 cmpxchg_acquire(&((v)->counter), (o), (n))
192
193#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
194#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
195
196
197
198
199
200
201
202
203
204
205static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
206{
207 int t;
208
209 __asm__ __volatile__ (
210 PPC_ATOMIC_ENTRY_BARRIER
211"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
212 cmpw 0,%0,%3 \n\
213 beq 2f \n\
214 add %0,%2,%0 \n"
215" stwcx. %0,0,%1 \n\
216 bne- 1b \n"
217 PPC_ATOMIC_EXIT_BARRIER
218" subf %0,%2,%0 \n\
2192:"
220 : "=&r" (t)
221 : "r" (&v->counter), "r" (a), "r" (u)
222 : "cc", "memory");
223
224 return t;
225}
226#define atomic_fetch_add_unless atomic_fetch_add_unless
227
228
229
230
231
232
233
234
235static __inline__ int atomic_inc_not_zero(atomic_t *v)
236{
237 int t1, t2;
238
239 __asm__ __volatile__ (
240 PPC_ATOMIC_ENTRY_BARRIER
241"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
242 cmpwi 0,%0,0\n\
243 beq- 2f\n\
244 addic %1,%0,1\n"
245" stwcx. %1,0,%2\n\
246 bne- 1b\n"
247 PPC_ATOMIC_EXIT_BARRIER
248 "\n\
2492:"
250 : "=&r" (t1), "=&r" (t2)
251 : "r" (&v->counter)
252 : "cc", "xer", "memory");
253
254 return t1;
255}
256#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
257
258
259
260
261
262
263static __inline__ int atomic_dec_if_positive(atomic_t *v)
264{
265 int t;
266
267 __asm__ __volatile__(
268 PPC_ATOMIC_ENTRY_BARRIER
269"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
270 cmpwi %0,1\n\
271 addi %0,%0,-1\n\
272 blt- 2f\n"
273" stwcx. %0,0,%1\n\
274 bne- 1b"
275 PPC_ATOMIC_EXIT_BARRIER
276 "\n\
2772:" : "=&b" (t)
278 : "r" (&v->counter)
279 : "cc", "memory");
280
281 return t;
282}
283#define atomic_dec_if_positive atomic_dec_if_positive
284
285#ifdef __powerpc64__
286
287#define ATOMIC64_INIT(i) { (i) }
288
289static __inline__ s64 atomic64_read(const atomic64_t *v)
290{
291 s64 t;
292
293 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
294
295 return t;
296}
297
298static __inline__ void atomic64_set(atomic64_t *v, s64 i)
299{
300 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
301}
302
303#define ATOMIC64_OP(op, asm_op) \
304static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
305{ \
306 s64 t; \
307 \
308 __asm__ __volatile__( \
309"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
310 #asm_op " %0,%2,%0\n" \
311" stdcx. %0,0,%3 \n" \
312" bne- 1b\n" \
313 : "=&r" (t), "+m" (v->counter) \
314 : "r" (a), "r" (&v->counter) \
315 : "cc"); \
316}
317
318#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
319static inline s64 \
320atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
321{ \
322 s64 t; \
323 \
324 __asm__ __volatile__( \
325"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
326 #asm_op " %0,%2,%0\n" \
327" stdcx. %0,0,%3\n" \
328" bne- 1b\n" \
329 : "=&r" (t), "+m" (v->counter) \
330 : "r" (a), "r" (&v->counter) \
331 : "cc"); \
332 \
333 return t; \
334}
335
336#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
337static inline s64 \
338atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
339{ \
340 s64 res, t; \
341 \
342 __asm__ __volatile__( \
343"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
344 #asm_op " %1,%3,%0\n" \
345" stdcx. %1,0,%4\n" \
346" bne- 1b\n" \
347 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
348 : "r" (a), "r" (&v->counter) \
349 : "cc"); \
350 \
351 return res; \
352}
353
354#define ATOMIC64_OPS(op, asm_op) \
355 ATOMIC64_OP(op, asm_op) \
356 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
357 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
358
359ATOMIC64_OPS(add, add)
360ATOMIC64_OPS(sub, subf)
361
362#define atomic64_add_return_relaxed atomic64_add_return_relaxed
363#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
364
365#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
366#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
367
368#undef ATOMIC64_OPS
369#define ATOMIC64_OPS(op, asm_op) \
370 ATOMIC64_OP(op, asm_op) \
371 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
372
373ATOMIC64_OPS(and, and)
374ATOMIC64_OPS(or, or)
375ATOMIC64_OPS(xor, xor)
376
377#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
378#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
379#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
380
381#undef ATOPIC64_OPS
382#undef ATOMIC64_FETCH_OP_RELAXED
383#undef ATOMIC64_OP_RETURN_RELAXED
384#undef ATOMIC64_OP
385
386static __inline__ void atomic64_inc(atomic64_t *v)
387{
388 s64 t;
389
390 __asm__ __volatile__(
391"1: ldarx %0,0,%2 # atomic64_inc\n\
392 addic %0,%0,1\n\
393 stdcx. %0,0,%2 \n\
394 bne- 1b"
395 : "=&r" (t), "+m" (v->counter)
396 : "r" (&v->counter)
397 : "cc", "xer");
398}
399#define atomic64_inc atomic64_inc
400
401static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
402{
403 s64 t;
404
405 __asm__ __volatile__(
406"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
407" addic %0,%0,1\n"
408" stdcx. %0,0,%2\n"
409" bne- 1b"
410 : "=&r" (t), "+m" (v->counter)
411 : "r" (&v->counter)
412 : "cc", "xer");
413
414 return t;
415}
416
417static __inline__ void atomic64_dec(atomic64_t *v)
418{
419 s64 t;
420
421 __asm__ __volatile__(
422"1: ldarx %0,0,%2 # atomic64_dec\n\
423 addic %0,%0,-1\n\
424 stdcx. %0,0,%2\n\
425 bne- 1b"
426 : "=&r" (t), "+m" (v->counter)
427 : "r" (&v->counter)
428 : "cc", "xer");
429}
430#define atomic64_dec atomic64_dec
431
432static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
433{
434 s64 t;
435
436 __asm__ __volatile__(
437"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
438" addic %0,%0,-1\n"
439" stdcx. %0,0,%2\n"
440" bne- 1b"
441 : "=&r" (t), "+m" (v->counter)
442 : "r" (&v->counter)
443 : "cc", "xer");
444
445 return t;
446}
447
448#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
449#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
450
451
452
453
454
455static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
456{
457 s64 t;
458
459 __asm__ __volatile__(
460 PPC_ATOMIC_ENTRY_BARRIER
461"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
462 addic. %0,%0,-1\n\
463 blt- 2f\n\
464 stdcx. %0,0,%1\n\
465 bne- 1b"
466 PPC_ATOMIC_EXIT_BARRIER
467 "\n\
4682:" : "=&r" (t)
469 : "r" (&v->counter)
470 : "cc", "xer", "memory");
471
472 return t;
473}
474#define atomic64_dec_if_positive atomic64_dec_if_positive
475
476#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
477#define atomic64_cmpxchg_relaxed(v, o, n) \
478 cmpxchg_relaxed(&((v)->counter), (o), (n))
479#define atomic64_cmpxchg_acquire(v, o, n) \
480 cmpxchg_acquire(&((v)->counter), (o), (n))
481
482#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
483#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
484
485
486
487
488
489
490
491
492
493
494static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
495{
496 s64 t;
497
498 __asm__ __volatile__ (
499 PPC_ATOMIC_ENTRY_BARRIER
500"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
501 cmpd 0,%0,%3 \n\
502 beq 2f \n\
503 add %0,%2,%0 \n"
504" stdcx. %0,0,%1 \n\
505 bne- 1b \n"
506 PPC_ATOMIC_EXIT_BARRIER
507" subf %0,%2,%0 \n\
5082:"
509 : "=&r" (t)
510 : "r" (&v->counter), "r" (a), "r" (u)
511 : "cc", "memory");
512
513 return t;
514}
515#define atomic64_fetch_add_unless atomic64_fetch_add_unless
516
517
518
519
520
521
522
523
524static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
525{
526 s64 t1, t2;
527
528 __asm__ __volatile__ (
529 PPC_ATOMIC_ENTRY_BARRIER
530"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
531 cmpdi 0,%0,0\n\
532 beq- 2f\n\
533 addic %1,%0,1\n\
534 stdcx. %1,0,%2\n\
535 bne- 1b\n"
536 PPC_ATOMIC_EXIT_BARRIER
537 "\n\
5382:"
539 : "=&r" (t1), "=&r" (t2)
540 : "r" (&v->counter)
541 : "cc", "xer", "memory");
542
543 return t1 != 0;
544}
545#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
546
547#endif
548
549#endif
550#endif
551