linux/arch/tile/include/asm/atomic_32.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * Do not include directly; use <linux/atomic.h>.
  15 */
  16
  17#ifndef _ASM_TILE_ATOMIC_32_H
  18#define _ASM_TILE_ATOMIC_32_H
  19
  20#include <arch/chip.h>
  21
  22#ifndef __ASSEMBLY__
  23
  24/* Tile-specific routines to support <linux/atomic.h>. */
  25int _atomic_xchg(atomic_t *v, int n);
  26int _atomic_xchg_add(atomic_t *v, int i);
  27int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
  28int _atomic_cmpxchg(atomic_t *v, int o, int n);
  29
  30/**
  31 * atomic_xchg - atomically exchange contents of memory with a new value
  32 * @v: pointer of type atomic_t
  33 * @i: integer value to store in memory
  34 *
  35 * Atomically sets @v to @i and returns old @v
  36 */
  37static inline int atomic_xchg(atomic_t *v, int n)
  38{
  39        smp_mb();  /* barrier for proper semantics */
  40        return _atomic_xchg(v, n);
  41}
  42
  43/**
  44 * atomic_cmpxchg - atomically exchange contents of memory if it matches
  45 * @v: pointer of type atomic_t
  46 * @o: old value that memory should have
  47 * @n: new value to write to memory if it matches
  48 *
  49 * Atomically checks if @v holds @o and replaces it with @n if so.
  50 * Returns the old value at @v.
  51 */
  52static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
  53{
  54        smp_mb();  /* barrier for proper semantics */
  55        return _atomic_cmpxchg(v, o, n);
  56}
  57
  58/**
  59 * atomic_add - add integer to atomic variable
  60 * @i: integer value to add
  61 * @v: pointer of type atomic_t
  62 *
  63 * Atomically adds @i to @v.
  64 */
  65static inline void atomic_add(int i, atomic_t *v)
  66{
  67        _atomic_xchg_add(v, i);
  68}
  69
  70/**
  71 * atomic_add_return - add integer and return
  72 * @v: pointer of type atomic_t
  73 * @i: integer value to add
  74 *
  75 * Atomically adds @i to @v and returns @i + @v
  76 */
  77static inline int atomic_add_return(int i, atomic_t *v)
  78{
  79        smp_mb();  /* barrier for proper semantics */
  80        return _atomic_xchg_add(v, i) + i;
  81}
  82
  83/**
  84 * __atomic_add_unless - add unless the number is already a given value
  85 * @v: pointer of type atomic_t
  86 * @a: the amount to add to v...
  87 * @u: ...unless v is equal to u.
  88 *
  89 * Atomically adds @a to @v, so long as @v was not already @u.
  90 * Returns the old value of @v.
  91 */
  92static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  93{
  94        smp_mb();  /* barrier for proper semantics */
  95        return _atomic_xchg_add_unless(v, a, u);
  96}
  97
  98/**
  99 * atomic_set - set atomic variable
 100 * @v: pointer of type atomic_t
 101 * @i: required value
 102 *
 103 * Atomically sets the value of @v to @i.
 104 *
 105 * atomic_set() can't be just a raw store, since it would be lost if it
 106 * fell between the load and store of one of the other atomic ops.
 107 */
 108static inline void atomic_set(atomic_t *v, int n)
 109{
 110        _atomic_xchg(v, n);
 111}
 112
 113/* A 64bit atomic type */
 114
 115typedef struct {
 116        u64 __aligned(8) counter;
 117} atomic64_t;
 118
 119#define ATOMIC64_INIT(val) { (val) }
 120
 121u64 _atomic64_xchg(atomic64_t *v, u64 n);
 122u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
 123u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
 124u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
 125
 126/**
 127 * atomic64_read - read atomic variable
 128 * @v: pointer of type atomic64_t
 129 *
 130 * Atomically reads the value of @v.
 131 */
 132static inline u64 atomic64_read(const atomic64_t *v)
 133{
 134        /*
 135         * Requires an atomic op to read both 32-bit parts consistently.
 136         * Casting away const is safe since the atomic support routines
 137         * do not write to memory if the value has not been modified.
 138         */
 139        return _atomic64_xchg_add((atomic64_t *)v, 0);
 140}
 141
 142/**
 143 * atomic64_xchg - atomically exchange contents of memory with a new value
 144 * @v: pointer of type atomic64_t
 145 * @i: integer value to store in memory
 146 *
 147 * Atomically sets @v to @i and returns old @v
 148 */
 149static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
 150{
 151        smp_mb();  /* barrier for proper semantics */
 152        return _atomic64_xchg(v, n);
 153}
 154
 155/**
 156 * atomic64_cmpxchg - atomically exchange contents of memory if it matches
 157 * @v: pointer of type atomic64_t
 158 * @o: old value that memory should have
 159 * @n: new value to write to memory if it matches
 160 *
 161 * Atomically checks if @v holds @o and replaces it with @n if so.
 162 * Returns the old value at @v.
 163 */
 164static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
 165{
 166        smp_mb();  /* barrier for proper semantics */
 167        return _atomic64_cmpxchg(v, o, n);
 168}
 169
 170/**
 171 * atomic64_add - add integer to atomic variable
 172 * @i: integer value to add
 173 * @v: pointer of type atomic64_t
 174 *
 175 * Atomically adds @i to @v.
 176 */
 177static inline void atomic64_add(u64 i, atomic64_t *v)
 178{
 179        _atomic64_xchg_add(v, i);
 180}
 181
 182/**
 183 * atomic64_add_return - add integer and return
 184 * @v: pointer of type atomic64_t
 185 * @i: integer value to add
 186 *
 187 * Atomically adds @i to @v and returns @i + @v
 188 */
 189static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
 190{
 191        smp_mb();  /* barrier for proper semantics */
 192        return _atomic64_xchg_add(v, i) + i;
 193}
 194
 195/**
 196 * atomic64_add_unless - add unless the number is already a given value
 197 * @v: pointer of type atomic64_t
 198 * @a: the amount to add to v...
 199 * @u: ...unless v is equal to u.
 200 *
 201 * Atomically adds @a to @v, so long as @v was not already @u.
 202 * Returns the old value of @v.
 203 */
 204static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
 205{
 206        smp_mb();  /* barrier for proper semantics */
 207        return _atomic64_xchg_add_unless(v, a, u) != u;
 208}
 209
 210/**
 211 * atomic64_set - set atomic variable
 212 * @v: pointer of type atomic64_t
 213 * @i: required value
 214 *
 215 * Atomically sets the value of @v to @i.
 216 *
 217 * atomic64_set() can't be just a raw store, since it would be lost if it
 218 * fell between the load and store of one of the other atomic ops.
 219 */
 220static inline void atomic64_set(atomic64_t *v, u64 n)
 221{
 222        _atomic64_xchg(v, n);
 223}
 224
 225#define atomic64_add_negative(a, v)     (atomic64_add_return((a), (v)) < 0)
 226#define atomic64_inc(v)                 atomic64_add(1LL, (v))
 227#define atomic64_inc_return(v)          atomic64_add_return(1LL, (v))
 228#define atomic64_inc_and_test(v)        (atomic64_inc_return(v) == 0)
 229#define atomic64_sub_return(i, v)       atomic64_add_return(-(i), (v))
 230#define atomic64_sub_and_test(a, v)     (atomic64_sub_return((a), (v)) == 0)
 231#define atomic64_sub(i, v)              atomic64_add(-(i), (v))
 232#define atomic64_dec(v)                 atomic64_sub(1LL, (v))
 233#define atomic64_dec_return(v)          atomic64_sub_return(1LL, (v))
 234#define atomic64_dec_and_test(v)        (atomic64_dec_return((v)) == 0)
 235#define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1LL, 0LL)
 236
 237/*
 238 * We need to barrier before modifying the word, since the _atomic_xxx()
 239 * routines just tns the lock and then read/modify/write of the word.
 240 * But after the word is updated, the routine issues an "mf" before returning,
 241 * and since it's a function call, we don't even need a compiler barrier.
 242 */
 243#define smp_mb__before_atomic_dec()     smp_mb()
 244#define smp_mb__before_atomic_inc()     smp_mb()
 245#define smp_mb__after_atomic_dec()      do { } while (0)
 246#define smp_mb__after_atomic_inc()      do { } while (0)
 247
 248#endif /* !__ASSEMBLY__ */
 249
 250/*
 251 * Internal definitions only beyond this point.
 252 */
 253
 254#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
 255  (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
 256
 257#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
 258
 259/* Number of entries in atomic_lock_ptr[]. */
 260#define ATOMIC_HASH_L1_SHIFT 6
 261#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
 262
 263/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
 264#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
 265#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
 266
 267#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 268
 269/*
 270 * Number of atomic locks in atomic_locks[]. Must be a power of two.
 271 * There is no reason for more than PAGE_SIZE / 8 entries, since that
 272 * is the maximum number of pointer bits we can use to index this.
 273 * And we cannot have more than PAGE_SIZE / 4, since this has to
 274 * fit on a single page and each entry takes 4 bytes.
 275 */
 276#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
 277#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
 278
 279#ifndef __ASSEMBLY__
 280extern int atomic_locks[];
 281#endif
 282
 283#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 284
 285/*
 286 * All the code that may fault while holding an atomic lock must
 287 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
 288 * can correctly release and reacquire the lock.  Note that we
 289 * mention the register number in a comment in "lib/atomic_asm.S" to help
 290 * assembly coders from using this register by mistake, so if it
 291 * is changed here, change that comment as well.
 292 */
 293#define ATOMIC_LOCK_REG 20
 294#define ATOMIC_LOCK_REG_NAME r20
 295
 296#ifndef __ASSEMBLY__
 297/* Called from setup to initialize a hash table to point to per_cpu locks. */
 298void __init_atomic_per_cpu(void);
 299
 300#ifdef CONFIG_SMP
 301/* Support releasing the atomic lock in do_page_fault_ics(). */
 302void __atomic_fault_unlock(int *lock_ptr);
 303#endif
 304
 305/* Private helper routines in lib/atomic_asm_32.S */
 306extern struct __get_user __atomic_cmpxchg(volatile int *p,
 307                                          int *lock, int o, int n);
 308extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
 309extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
 310extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
 311                                                  int *lock, int o, int n);
 312extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
 313extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
 314extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
 315extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
 316extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
 317extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
 318extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
 319                                      int *lock, u64 o, u64 n);
 320
 321#endif /* !__ASSEMBLY__ */
 322
 323#endif /* _ASM_TILE_ATOMIC_32_H */
 324