linux/lib/atomic64.c
<<
>>
Prefs
   1/*
   2 * Generic implementation of 64-bit atomics using spinlocks,
   3 * useful on processors that don't have 64-bit atomic instructions.
   4 *
   5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; either version
  10 * 2 of the License, or (at your option) any later version.
  11 */
  12#include <linux/types.h>
  13#include <linux/cache.h>
  14#include <linux/spinlock.h>
  15#include <linux/init.h>
  16#include <linux/export.h>
  17#include <linux/atomic.h>
  18
  19/*
  20 * We use a hashed array of spinlocks to provide exclusive access
  21 * to each atomic64_t variable.  Since this is expected to used on
  22 * systems with small numbers of CPUs (<= 4 or so), we use a
  23 * relatively small array of 16 spinlocks to avoid wasting too much
  24 * memory on the spinlock array.
  25 */
  26#define NR_LOCKS        16
  27
  28/*
  29 * Ensure each lock is in a separate cacheline.
  30 */
  31static union {
  32        raw_spinlock_t lock;
  33        char pad[L1_CACHE_BYTES];
  34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
  35        [0 ... (NR_LOCKS - 1)] = {
  36                .lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
  37        },
  38};
  39
  40static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
  41{
  42        unsigned long addr = (unsigned long) v;
  43
  44        addr >>= L1_CACHE_SHIFT;
  45        addr ^= (addr >> 8) ^ (addr >> 16);
  46        return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
  47}
  48
  49long long atomic64_read(const atomic64_t *v)
  50{
  51        unsigned long flags;
  52        raw_spinlock_t *lock = lock_addr(v);
  53        long long val;
  54
  55        raw_spin_lock_irqsave(lock, flags);
  56        val = v->counter;
  57        raw_spin_unlock_irqrestore(lock, flags);
  58        return val;
  59}
  60EXPORT_SYMBOL(atomic64_read);
  61
  62void atomic64_set(atomic64_t *v, long long i)
  63{
  64        unsigned long flags;
  65        raw_spinlock_t *lock = lock_addr(v);
  66
  67        raw_spin_lock_irqsave(lock, flags);
  68        v->counter = i;
  69        raw_spin_unlock_irqrestore(lock, flags);
  70}
  71EXPORT_SYMBOL(atomic64_set);
  72
  73void atomic64_add(long long a, atomic64_t *v)
  74{
  75        unsigned long flags;
  76        raw_spinlock_t *lock = lock_addr(v);
  77
  78        raw_spin_lock_irqsave(lock, flags);
  79        v->counter += a;
  80        raw_spin_unlock_irqrestore(lock, flags);
  81}
  82EXPORT_SYMBOL(atomic64_add);
  83
  84long long atomic64_add_return(long long a, atomic64_t *v)
  85{
  86        unsigned long flags;
  87        raw_spinlock_t *lock = lock_addr(v);
  88        long long val;
  89
  90        raw_spin_lock_irqsave(lock, flags);
  91        val = v->counter += a;
  92        raw_spin_unlock_irqrestore(lock, flags);
  93        return val;
  94}
  95EXPORT_SYMBOL(atomic64_add_return);
  96
  97void atomic64_sub(long long a, atomic64_t *v)
  98{
  99        unsigned long flags;
 100        raw_spinlock_t *lock = lock_addr(v);
 101
 102        raw_spin_lock_irqsave(lock, flags);
 103        v->counter -= a;
 104        raw_spin_unlock_irqrestore(lock, flags);
 105}
 106EXPORT_SYMBOL(atomic64_sub);
 107
 108long long atomic64_sub_return(long long a, atomic64_t *v)
 109{
 110        unsigned long flags;
 111        raw_spinlock_t *lock = lock_addr(v);
 112        long long val;
 113
 114        raw_spin_lock_irqsave(lock, flags);
 115        val = v->counter -= a;
 116        raw_spin_unlock_irqrestore(lock, flags);
 117        return val;
 118}
 119EXPORT_SYMBOL(atomic64_sub_return);
 120
 121long long atomic64_dec_if_positive(atomic64_t *v)
 122{
 123        unsigned long flags;
 124        raw_spinlock_t *lock = lock_addr(v);
 125        long long val;
 126
 127        raw_spin_lock_irqsave(lock, flags);
 128        val = v->counter - 1;
 129        if (val >= 0)
 130                v->counter = val;
 131        raw_spin_unlock_irqrestore(lock, flags);
 132        return val;
 133}
 134EXPORT_SYMBOL(atomic64_dec_if_positive);
 135
 136long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
 137{
 138        unsigned long flags;
 139        raw_spinlock_t *lock = lock_addr(v);
 140        long long val;
 141
 142        raw_spin_lock_irqsave(lock, flags);
 143        val = v->counter;
 144        if (val == o)
 145                v->counter = n;
 146        raw_spin_unlock_irqrestore(lock, flags);
 147        return val;
 148}
 149EXPORT_SYMBOL(atomic64_cmpxchg);
 150
 151long long atomic64_xchg(atomic64_t *v, long long new)
 152{
 153        unsigned long flags;
 154        raw_spinlock_t *lock = lock_addr(v);
 155        long long val;
 156
 157        raw_spin_lock_irqsave(lock, flags);
 158        val = v->counter;
 159        v->counter = new;
 160        raw_spin_unlock_irqrestore(lock, flags);
 161        return val;
 162}
 163EXPORT_SYMBOL(atomic64_xchg);
 164
 165int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 166{
 167        unsigned long flags;
 168        raw_spinlock_t *lock = lock_addr(v);
 169        int ret = 0;
 170
 171        raw_spin_lock_irqsave(lock, flags);
 172        if (v->counter != u) {
 173                v->counter += a;
 174                ret = 1;
 175        }
 176        raw_spin_unlock_irqrestore(lock, flags);
 177        return ret;
 178}
 179EXPORT_SYMBOL(atomic64_add_unless);
 180