linux/arch/xtensa/include/asm/atomic.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/atomic.h
   3 *
   4 * Atomic operations that C can't guarantee us.  Useful for resource counting..
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 *
  10 * Copyright (C) 2001 - 2008 Tensilica Inc.
  11 */
  12
  13#ifndef _XTENSA_ATOMIC_H
  14#define _XTENSA_ATOMIC_H
  15
  16#include <linux/stringify.h>
  17#include <linux/types.h>
  18
  19#ifdef __KERNEL__
  20#include <asm/processor.h>
  21#include <asm/cmpxchg.h>
  22
  23#define ATOMIC_INIT(i)  { (i) }
  24
  25/*
  26 * This Xtensa implementation assumes that the right mechanism
  27 * for exclusion is for locking interrupts to level EXCM_LEVEL.
  28 *
  29 * Locking interrupts looks like this:
  30 *
  31 *    rsil a15, LOCKLEVEL
  32 *    <code>
  33 *    wsr  a15, PS
  34 *    rsync
  35 *
  36 * Note that a15 is used here because the register allocation
  37 * done by the compiler is not guaranteed and a window overflow
  38 * may not occur between the rsil and wsr instructions. By using
  39 * a15 in the rsil, the machine is guaranteed to be in a state
  40 * where no register reference will cause an overflow.
  41 */
  42
  43/**
  44 * atomic_read - read atomic variable
  45 * @v: pointer of type atomic_t
  46 *
  47 * Atomically reads the value of @v.
  48 */
  49#define atomic_read(v)          (*(volatile int *)&(v)->counter)
  50
  51/**
  52 * atomic_set - set atomic variable
  53 * @v: pointer of type atomic_t
  54 * @i: required value
  55 *
  56 * Atomically sets the value of @v to @i.
  57 */
  58#define atomic_set(v,i)         ((v)->counter = (i))
  59
  60/**
  61 * atomic_add - add integer to atomic variable
  62 * @i: integer value to add
  63 * @v: pointer of type atomic_t
  64 *
  65 * Atomically adds @i to @v.
  66 */
  67static inline void atomic_add(int i, atomic_t * v)
  68{
  69#if XCHAL_HAVE_S32C1I
  70        unsigned long tmp;
  71        int result;
  72
  73        __asm__ __volatile__(
  74                        "1:     l32i    %1, %3, 0\n"
  75                        "       wsr     %1, scompare1\n"
  76                        "       add     %0, %1, %2\n"
  77                        "       s32c1i  %0, %3, 0\n"
  78                        "       bne     %0, %1, 1b\n"
  79                        : "=&a" (result), "=&a" (tmp)
  80                        : "a" (i), "a" (v)
  81                        : "memory"
  82                        );
  83#else
  84        unsigned int vval;
  85
  86        __asm__ __volatile__(
  87                        "       rsil    a15, "__stringify(LOCKLEVEL)"\n"
  88                        "       l32i    %0, %2, 0\n"
  89                        "       add     %0, %0, %1\n"
  90                        "       s32i    %0, %2, 0\n"
  91                        "       wsr     a15, ps\n"
  92                        "       rsync\n"
  93                        : "=&a" (vval)
  94                        : "a" (i), "a" (v)
  95                        : "a15", "memory"
  96                        );
  97#endif
  98}
  99
 100/**
 101 * atomic_sub - subtract the atomic variable
 102 * @i: integer value to subtract
 103 * @v: pointer of type atomic_t
 104 *
 105 * Atomically subtracts @i from @v.
 106 */
 107static inline void atomic_sub(int i, atomic_t *v)
 108{
 109#if XCHAL_HAVE_S32C1I
 110        unsigned long tmp;
 111        int result;
 112
 113        __asm__ __volatile__(
 114                        "1:     l32i    %1, %3, 0\n"
 115                        "       wsr     %1, scompare1\n"
 116                        "       sub     %0, %1, %2\n"
 117                        "       s32c1i  %0, %3, 0\n"
 118                        "       bne     %0, %1, 1b\n"
 119                        : "=&a" (result), "=&a" (tmp)
 120                        : "a" (i), "a" (v)
 121                        : "memory"
 122                        );
 123#else
 124        unsigned int vval;
 125
 126        __asm__ __volatile__(
 127                        "       rsil    a15, "__stringify(LOCKLEVEL)"\n"
 128                        "       l32i    %0, %2, 0\n"
 129                        "       sub     %0, %0, %1\n"
 130                        "       s32i    %0, %2, 0\n"
 131                        "       wsr     a15, ps\n"
 132                        "       rsync\n"
 133                        : "=&a" (vval)
 134                        : "a" (i), "a" (v)
 135                        : "a15", "memory"
 136                        );
 137#endif
 138}
 139
 140/*
 141 * We use atomic_{add|sub}_return to define other functions.
 142 */
 143
 144static inline int atomic_add_return(int i, atomic_t * v)
 145{
 146#if XCHAL_HAVE_S32C1I
 147        unsigned long tmp;
 148        int result;
 149
 150        __asm__ __volatile__(
 151                        "1:     l32i    %1, %3, 0\n"
 152                        "       wsr     %1, scompare1\n"
 153                        "       add     %0, %1, %2\n"
 154                        "       s32c1i  %0, %3, 0\n"
 155                        "       bne     %0, %1, 1b\n"
 156                        "       add     %0, %0, %2\n"
 157                        : "=&a" (result), "=&a" (tmp)
 158                        : "a" (i), "a" (v)
 159                        : "memory"
 160                        );
 161
 162        return result;
 163#else
 164        unsigned int vval;
 165
 166        __asm__ __volatile__(
 167                        "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
 168                        "       l32i    %0, %2, 0\n"
 169                        "       add     %0, %0, %1\n"
 170                        "       s32i    %0, %2, 0\n"
 171                        "       wsr     a15, ps\n"
 172                        "       rsync\n"
 173                        : "=&a" (vval)
 174                        : "a" (i), "a" (v)
 175                        : "a15", "memory"
 176                        );
 177
 178        return vval;
 179#endif
 180}
 181
 182static inline int atomic_sub_return(int i, atomic_t * v)
 183{
 184#if XCHAL_HAVE_S32C1I
 185        unsigned long tmp;
 186        int result;
 187
 188        __asm__ __volatile__(
 189                        "1:     l32i    %1, %3, 0\n"
 190                        "       wsr     %1, scompare1\n"
 191                        "       sub     %0, %1, %2\n"
 192                        "       s32c1i  %0, %3, 0\n"
 193                        "       bne     %0, %1, 1b\n"
 194                        "       sub     %0, %0, %2\n"
 195                        : "=&a" (result), "=&a" (tmp)
 196                        : "a" (i), "a" (v)
 197                        : "memory"
 198                        );
 199
 200        return result;
 201#else
 202        unsigned int vval;
 203
 204        __asm__ __volatile__(
 205                        "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
 206                        "       l32i    %0, %2, 0\n"
 207                        "       sub     %0, %0, %1\n"
 208                        "       s32i    %0, %2, 0\n"
 209                        "       wsr     a15, ps\n"
 210                        "       rsync\n"
 211                        : "=&a" (vval)
 212                        : "a" (i), "a" (v)
 213                        : "a15", "memory"
 214                        );
 215
 216        return vval;
 217#endif
 218}
 219
 220/**
 221 * atomic_sub_and_test - subtract value from variable and test result
 222 * @i: integer value to subtract
 223 * @v: pointer of type atomic_t
 224 *
 225 * Atomically subtracts @i from @v and returns
 226 * true if the result is zero, or false for all
 227 * other cases.
 228 */
 229#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
 230
 231/**
 232 * atomic_inc - increment atomic variable
 233 * @v: pointer of type atomic_t
 234 *
 235 * Atomically increments @v by 1.
 236 */
 237#define atomic_inc(v) atomic_add(1,(v))
 238
 239/**
 240 * atomic_inc - increment atomic variable
 241 * @v: pointer of type atomic_t
 242 *
 243 * Atomically increments @v by 1.
 244 */
 245#define atomic_inc_return(v) atomic_add_return(1,(v))
 246
 247/**
 248 * atomic_dec - decrement atomic variable
 249 * @v: pointer of type atomic_t
 250 *
 251 * Atomically decrements @v by 1.
 252 */
 253#define atomic_dec(v) atomic_sub(1,(v))
 254
 255/**
 256 * atomic_dec_return - decrement atomic variable
 257 * @v: pointer of type atomic_t
 258 *
 259 * Atomically decrements @v by 1.
 260 */
 261#define atomic_dec_return(v) atomic_sub_return(1,(v))
 262
 263/**
 264 * atomic_dec_and_test - decrement and test
 265 * @v: pointer of type atomic_t
 266 *
 267 * Atomically decrements @v by 1 and
 268 * returns true if the result is 0, or false for all other
 269 * cases.
 270 */
 271#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
 272
 273/**
 274 * atomic_inc_and_test - increment and test
 275 * @v: pointer of type atomic_t
 276 *
 277 * Atomically increments @v by 1
 278 * and returns true if the result is zero, or false for all
 279 * other cases.
 280 */
 281#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
 282
 283/**
 284 * atomic_add_negative - add and test if negative
 285 * @v: pointer of type atomic_t
 286 * @i: integer value to add
 287 *
 288 * Atomically adds @i to @v and returns true
 289 * if the result is negative, or false when
 290 * result is greater than or equal to zero.
 291 */
 292#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
 293
 294#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 295#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 296
 297/**
 298 * __atomic_add_unless - add unless the number is a given value
 299 * @v: pointer of type atomic_t
 300 * @a: the amount to add to v...
 301 * @u: ...unless v is equal to u.
 302 *
 303 * Atomically adds @a to @v, so long as it was not @u.
 304 * Returns the old value of @v.
 305 */
 306static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 307{
 308        int c, old;
 309        c = atomic_read(v);
 310        for (;;) {
 311                if (unlikely(c == (u)))
 312                        break;
 313                old = atomic_cmpxchg((v), c, c + (a));
 314                if (likely(old == c))
 315                        break;
 316                c = old;
 317        }
 318        return c;
 319}
 320
 321
 322static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 323{
 324#if XCHAL_HAVE_S32C1I
 325        unsigned long tmp;
 326        int result;
 327
 328        __asm__ __volatile__(
 329                        "1:     l32i    %1, %3, 0\n"
 330                        "       wsr     %1, scompare1\n"
 331                        "       and     %0, %1, %2\n"
 332                        "       s32c1i  %0, %3, 0\n"
 333                        "       bne     %0, %1, 1b\n"
 334                        : "=&a" (result), "=&a" (tmp)
 335                        : "a" (~mask), "a" (v)
 336                        : "memory"
 337                        );
 338#else
 339        unsigned int all_f = -1;
 340        unsigned int vval;
 341
 342        __asm__ __volatile__(
 343                        "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
 344                        "       l32i    %0, %2, 0\n"
 345                        "       xor     %1, %4, %3\n"
 346                        "       and     %0, %0, %4\n"
 347                        "       s32i    %0, %2, 0\n"
 348                        "       wsr     a15, ps\n"
 349                        "       rsync\n"
 350                        : "=&a" (vval), "=a" (mask)
 351                        : "a" (v), "a" (all_f), "1" (mask)
 352                        : "a15", "memory"
 353                        );
 354#endif
 355}
 356
 357static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 358{
 359#if XCHAL_HAVE_S32C1I
 360        unsigned long tmp;
 361        int result;
 362
 363        __asm__ __volatile__(
 364                        "1:     l32i    %1, %3, 0\n"
 365                        "       wsr     %1, scompare1\n"
 366                        "       or      %0, %1, %2\n"
 367                        "       s32c1i  %0, %3, 0\n"
 368                        "       bne     %0, %1, 1b\n"
 369                        : "=&a" (result), "=&a" (tmp)
 370                        : "a" (mask), "a" (v)
 371                        : "memory"
 372                        );
 373#else
 374        unsigned int vval;
 375
 376        __asm__ __volatile__(
 377                        "       rsil    a15,"__stringify(LOCKLEVEL)"\n"
 378                        "       l32i    %0, %2, 0\n"
 379                        "       or      %0, %0, %1\n"
 380                        "       s32i    %0, %2, 0\n"
 381                        "       wsr     a15, ps\n"
 382                        "       rsync\n"
 383                        : "=&a" (vval)
 384                        : "a" (mask), "a" (v)
 385                        : "a15", "memory"
 386                        );
 387#endif
 388}
 389
 390/* Atomic operations are already serializing */
 391#define smp_mb__before_atomic_dec()     barrier()
 392#define smp_mb__after_atomic_dec()      barrier()
 393#define smp_mb__before_atomic_inc()     barrier()
 394#define smp_mb__after_atomic_inc()      barrier()
 395
 396#endif /* __KERNEL__ */
 397
 398#endif /* _XTENSA_ATOMIC_H */
 399