linux/lib/lockref.c
<<
>>
Prefs
   1#include <linux/export.h>
   2#include <linux/lockref.h>
   3
   4#ifdef CONFIG_CMPXCHG_LOCKREF
   5
   6/*
   7 * Allow weakly-ordered memory architectures to provide barrier-less
   8 * cmpxchg semantics for lockref updates.
   9 */
  10#ifndef cmpxchg64_relaxed
  11# define cmpxchg64_relaxed cmpxchg64
  12#endif
  13
  14/*
  15 * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP.
  16 * This is useful for architectures with an expensive cpu_relax().
  17 */
  18#ifndef arch_mutex_cpu_relax
  19# define arch_mutex_cpu_relax() cpu_relax()
  20#endif
  21
  22/*
  23 * Note that the "cmpxchg()" reloads the "old" value for the
  24 * failure case.
  25 */
  26#define CMPXCHG_LOOP(CODE, SUCCESS) do {                                        \
  27        struct lockref old;                                                     \
  28        BUILD_BUG_ON(sizeof(old) != 8);                                         \
  29        old.lock_count = READ_ONCE(lockref->lock_count);                        \
  30        while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {     \
  31                struct lockref new = old, prev = old;                           \
  32                CODE                                                            \
  33                old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,        \
  34                                                   old.lock_count,              \
  35                                                   new.lock_count);             \
  36                if (likely(old.lock_count == prev.lock_count)) {                \
  37                        SUCCESS;                                                \
  38                }                                                               \
  39                arch_mutex_cpu_relax();                                         \
  40        }                                                                       \
  41} while (0)
  42
  43#else
  44
  45#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  46
  47#endif
  48
  49/**
  50 * lockref_get - Increments reference count unconditionally
  51 * @lockref: pointer to lockref structure
  52 *
  53 * This operation is only valid if you already hold a reference
  54 * to the object, so you know the count cannot be zero.
  55 */
  56void lockref_get(struct lockref *lockref)
  57{
  58        CMPXCHG_LOOP(
  59                new.count++;
  60        ,
  61                return;
  62        );
  63
  64        spin_lock(&lockref->lock);
  65        lockref->count++;
  66        spin_unlock(&lockref->lock);
  67}
  68EXPORT_SYMBOL(lockref_get);
  69
  70/**
  71 * lockref_get_not_zero - Increments count unless the count is 0
  72 * @lockref: pointer to lockref structure
  73 * Return: 1 if count updated successfully or 0 if count was zero
  74 */
  75int lockref_get_not_zero(struct lockref *lockref)
  76{
  77        int retval;
  78
  79        CMPXCHG_LOOP(
  80                new.count++;
  81                if (!old.count)
  82                        return 0;
  83        ,
  84                return 1;
  85        );
  86
  87        spin_lock(&lockref->lock);
  88        retval = 0;
  89        if (lockref->count) {
  90                lockref->count++;
  91                retval = 1;
  92        }
  93        spin_unlock(&lockref->lock);
  94        return retval;
  95}
  96EXPORT_SYMBOL(lockref_get_not_zero);
  97
  98/**
  99 * lockref_get_or_lock - Increments count unless the count is 0
 100 * @lockref: pointer to lockref structure
 101 * Return: 1 if count updated successfully or 0 if count was zero
 102 * and we got the lock instead.
 103 */
 104int lockref_get_or_lock(struct lockref *lockref)
 105{
 106        CMPXCHG_LOOP(
 107                new.count++;
 108                if (!old.count)
 109                        break;
 110        ,
 111                return 1;
 112        );
 113
 114        spin_lock(&lockref->lock);
 115        if (!lockref->count)
 116                return 0;
 117        lockref->count++;
 118        spin_unlock(&lockref->lock);
 119        return 1;
 120}
 121EXPORT_SYMBOL(lockref_get_or_lock);
 122
 123/**
 124 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
 125 * @lockref: pointer to lockref structure
 126 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
 127 */
 128int lockref_put_or_lock(struct lockref *lockref)
 129{
 130        CMPXCHG_LOOP(
 131                new.count--;
 132                if (old.count <= 1)
 133                        break;
 134        ,
 135                return 1;
 136        );
 137
 138        spin_lock(&lockref->lock);
 139        if (lockref->count <= 1)
 140                return 0;
 141        lockref->count--;
 142        spin_unlock(&lockref->lock);
 143        return 1;
 144}
 145EXPORT_SYMBOL(lockref_put_or_lock);
 146
 147/**
 148 * lockref_mark_dead - mark lockref dead
 149 * @lockref: pointer to lockref structure
 150 */
 151void lockref_mark_dead(struct lockref *lockref)
 152{
 153        assert_spin_locked(&lockref->lock);
 154        lockref->count = -128;
 155}
 156EXPORT_SYMBOL(lockref_mark_dead);
 157
 158/**
 159 * lockref_get_not_dead - Increments count unless the ref is dead
 160 * @lockref: pointer to lockref structure
 161 * Return: 1 if count updated successfully or 0 if lockref was dead
 162 */
 163int lockref_get_not_dead(struct lockref *lockref)
 164{
 165        int retval;
 166
 167        CMPXCHG_LOOP(
 168                new.count++;
 169                if ((int)old.count < 0)
 170                        return 0;
 171        ,
 172                return 1;
 173        );
 174
 175        spin_lock(&lockref->lock);
 176        retval = 0;
 177        if ((int) lockref->count >= 0) {
 178                lockref->count++;
 179                retval = 1;
 180        }
 181        spin_unlock(&lockref->lock);
 182        return retval;
 183}
 184EXPORT_SYMBOL(lockref_get_not_dead);
 185