linux/arch/xtensa/include/asm/atomic.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/atomic.h
   3 *
   4 * Atomic operations that C can't guarantee us.  Useful for resource counting..
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 *
  10 * Copyright (C) 2001 - 2005 Tensilica Inc.
  11 */
  12
  13#ifndef _XTENSA_ATOMIC_H
  14#define _XTENSA_ATOMIC_H
  15
  16#include <linux/stringify.h>
  17#include <linux/types.h>
  18
  19#ifdef __KERNEL__
  20#include <asm/processor.h>
  21#include <asm/system.h>
  22
  23#define ATOMIC_INIT(i)  { (i) }
  24
  25/*
  26 * This Xtensa implementation assumes that the right mechanism
  27 * for exclusion is for locking interrupts to level 1.
  28 *
  29 * Locking interrupts looks like this:
  30 *
  31 *    rsil a15, 1
  32 *    <code>
  33 *    wsr  a15, PS
  34 *    rsync
  35 *
  36 * Note that a15 is used here because the register allocation
  37 * done by the compiler is not guaranteed and a window overflow
  38 * may not occur between the rsil and wsr instructions. By using
  39 * a15 in the rsil, the machine is guaranteed to be in a state
  40 * where no register reference will cause an overflow.
  41 */
  42
  43/**
  44 * atomic_read - read atomic variable
  45 * @v: pointer of type atomic_t
  46 *
  47 * Atomically reads the value of @v.
  48 */
  49#define atomic_read(v)          ((v)->counter)
  50
  51/**
  52 * atomic_set - set atomic variable
  53 * @v: pointer of type atomic_t
  54 * @i: required value
  55 *
  56 * Atomically sets the value of @v to @i.
  57 */
  58#define atomic_set(v,i)         ((v)->counter = (i))
  59
  60/**
  61 * atomic_add - add integer to atomic variable
  62 * @i: integer value to add
  63 * @v: pointer of type atomic_t
  64 *
  65 * Atomically adds @i to @v.
  66 */
  67static inline void atomic_add(int i, atomic_t * v)
  68{
  69    unsigned int vval;
  70
  71    __asm__ __volatile__(
  72        "rsil    a15, "__stringify(LOCKLEVEL)"\n\t"
  73        "l32i    %0, %2, 0              \n\t"
  74        "add     %0, %0, %1             \n\t"
  75        "s32i    %0, %2, 0              \n\t"
  76        "wsr     a15, "__stringify(PS)"       \n\t"
  77        "rsync                          \n"
  78        : "=&a" (vval)
  79        : "a" (i), "a" (v)
  80        : "a15", "memory"
  81        );
  82}
  83
  84/**
  85 * atomic_sub - subtract the atomic variable
  86 * @i: integer value to subtract
  87 * @v: pointer of type atomic_t
  88 *
  89 * Atomically subtracts @i from @v.
  90 */
  91static inline void atomic_sub(int i, atomic_t *v)
  92{
  93    unsigned int vval;
  94
  95    __asm__ __volatile__(
  96        "rsil    a15, "__stringify(LOCKLEVEL)"\n\t"
  97        "l32i    %0, %2, 0              \n\t"
  98        "sub     %0, %0, %1             \n\t"
  99        "s32i    %0, %2, 0              \n\t"
 100        "wsr     a15, "__stringify(PS)"       \n\t"
 101        "rsync                          \n"
 102        : "=&a" (vval)
 103        : "a" (i), "a" (v)
 104        : "a15", "memory"
 105        );
 106}
 107
 108/*
 109 * We use atomic_{add|sub}_return to define other functions.
 110 */
 111
 112static inline int atomic_add_return(int i, atomic_t * v)
 113{
 114     unsigned int vval;
 115
 116    __asm__ __volatile__(
 117        "rsil    a15,"__stringify(LOCKLEVEL)"\n\t"
 118        "l32i    %0, %2, 0             \n\t"
 119        "add     %0, %0, %1            \n\t"
 120        "s32i    %0, %2, 0             \n\t"
 121        "wsr     a15, "__stringify(PS)"      \n\t"
 122        "rsync                         \n"
 123        : "=&a" (vval)
 124        : "a" (i), "a" (v)
 125        : "a15", "memory"
 126        );
 127
 128    return vval;
 129}
 130
 131static inline int atomic_sub_return(int i, atomic_t * v)
 132{
 133    unsigned int vval;
 134
 135    __asm__ __volatile__(
 136        "rsil    a15,"__stringify(LOCKLEVEL)"\n\t"
 137        "l32i    %0, %2, 0             \n\t"
 138        "sub     %0, %0, %1            \n\t"
 139        "s32i    %0, %2, 0             \n\t"
 140        "wsr     a15, "__stringify(PS)"       \n\t"
 141        "rsync                         \n"
 142        : "=&a" (vval)
 143        : "a" (i), "a" (v)
 144        : "a15", "memory"
 145        );
 146
 147    return vval;
 148}
 149
 150/**
 151 * atomic_sub_and_test - subtract value from variable and test result
 152 * @i: integer value to subtract
 153 * @v: pointer of type atomic_t
 154 *
 155 * Atomically subtracts @i from @v and returns
 156 * true if the result is zero, or false for all
 157 * other cases.
 158 */
 159#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
 160
 161/**
 162 * atomic_inc - increment atomic variable
 163 * @v: pointer of type atomic_t
 164 *
 165 * Atomically increments @v by 1.
 166 */
 167#define atomic_inc(v) atomic_add(1,(v))
 168
 169/**
 170 * atomic_inc - increment atomic variable
 171 * @v: pointer of type atomic_t
 172 *
 173 * Atomically increments @v by 1.
 174 */
 175#define atomic_inc_return(v) atomic_add_return(1,(v))
 176
 177/**
 178 * atomic_dec - decrement atomic variable
 179 * @v: pointer of type atomic_t
 180 *
 181 * Atomically decrements @v by 1.
 182 */
 183#define atomic_dec(v) atomic_sub(1,(v))
 184
 185/**
 186 * atomic_dec_return - decrement atomic variable
 187 * @v: pointer of type atomic_t
 188 *
 189 * Atomically decrements @v by 1.
 190 */
 191#define atomic_dec_return(v) atomic_sub_return(1,(v))
 192
 193/**
 194 * atomic_dec_and_test - decrement and test
 195 * @v: pointer of type atomic_t
 196 *
 197 * Atomically decrements @v by 1 and
 198 * returns true if the result is 0, or false for all other
 199 * cases.
 200 */
 201#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
 202
 203/**
 204 * atomic_inc_and_test - increment and test
 205 * @v: pointer of type atomic_t
 206 *
 207 * Atomically increments @v by 1
 208 * and returns true if the result is zero, or false for all
 209 * other cases.
 210 */
 211#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
 212
 213/**
 214 * atomic_add_negative - add and test if negative
 215 * @v: pointer of type atomic_t
 216 * @i: integer value to add
 217 *
 218 * Atomically adds @i to @v and returns true
 219 * if the result is negative, or false when
 220 * result is greater than or equal to zero.
 221 */
 222#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
 223
 224#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 225#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 226
 227/**
 228 * atomic_add_unless - add unless the number is a given value
 229 * @v: pointer of type atomic_t
 230 * @a: the amount to add to v...
 231 * @u: ...unless v is equal to u.
 232 *
 233 * Atomically adds @a to @v, so long as it was not @u.
 234 * Returns non-zero if @v was not @u, and zero otherwise.
 235 */
 236static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
 237{
 238        int c, old;
 239        c = atomic_read(v);
 240        for (;;) {
 241                if (unlikely(c == (u)))
 242                        break;
 243                old = atomic_cmpxchg((v), c, c + (a));
 244                if (likely(old == c))
 245                        break;
 246                c = old;
 247        }
 248        return c != (u);
 249}
 250
 251#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 252
 253static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 254{
 255    unsigned int all_f = -1;
 256    unsigned int vval;
 257
 258    __asm__ __volatile__(
 259        "rsil    a15,"__stringify(LOCKLEVEL)"\n\t"
 260        "l32i    %0, %2, 0             \n\t"
 261        "xor     %1, %4, %3            \n\t"
 262        "and     %0, %0, %4            \n\t"
 263        "s32i    %0, %2, 0             \n\t"
 264        "wsr     a15, "__stringify(PS)"      \n\t"
 265        "rsync                         \n"
 266        : "=&a" (vval), "=a" (mask)
 267        : "a" (v), "a" (all_f), "1" (mask)
 268        : "a15", "memory"
 269        );
 270}
 271
 272static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 273{
 274    unsigned int vval;
 275
 276    __asm__ __volatile__(
 277        "rsil    a15,"__stringify(LOCKLEVEL)"\n\t"
 278        "l32i    %0, %2, 0             \n\t"
 279        "or      %0, %0, %1            \n\t"
 280        "s32i    %0, %2, 0             \n\t"
 281        "wsr     a15, "__stringify(PS)"       \n\t"
 282        "rsync                         \n"
 283        : "=&a" (vval)
 284        : "a" (mask), "a" (v)
 285        : "a15", "memory"
 286        );
 287}
 288
 289/* Atomic operations are already serializing */
 290#define smp_mb__before_atomic_dec()     barrier()
 291#define smp_mb__after_atomic_dec()      barrier()
 292#define smp_mb__before_atomic_inc()     barrier()
 293#define smp_mb__after_atomic_inc()      barrier()
 294
 295#include <asm-generic/atomic-long.h>
 296#endif /* __KERNEL__ */
 297
 298#endif /* _XTENSA_ATOMIC_H */
 299
 300