linux/include/linux/percpu-refcount.h
<<
>>
Prefs
   1/*
   2 * Percpu refcounts:
   3 * (C) 2012 Google, Inc.
   4 * Author: Kent Overstreet <koverstreet@google.com>
   5 *
   6 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
   7 * atomic_dec_and_test() - but percpu.
   8 *
   9 * There's one important difference between percpu refs and normal atomic_t
  10 * refcounts; you have to keep track of your initial refcount, and then when you
  11 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
  12 * refcount.
  13 *
  14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
  15 * than an atomic_t - this is because of the way shutdown works, see
  16 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
  17 *
  18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
  19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
  20 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
  21 * issuing the appropriate barriers, and then marks the ref as shutting down so
  22 * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
  23 * it's safe to drop the initial ref.
  24 *
  25 * USAGE:
  26 *
  27 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
  28 * is created when userspaces calls io_setup(), and destroyed when userspace
  29 * calls io_destroy() or the process exits.
  30 *
  31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
  32 * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
  33 * the kioctx from the proccess's list of kioctxs - after that, there can't be
  34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
  35 * the initial ref with percpu_ref_put().
  36 *
  37 * Code that does a two stage shutdown like this often needs some kind of
  38 * explicit synchronization to ensure the initial refcount can only be dropped
  39 * once - percpu_ref_kill() does this for you, it returns true once and false if
  40 * someone else already called it. The aio code uses it this way, but it's not
  41 * necessary if the code has some other mechanism to synchronize teardown.
  42 * around.
  43 */
  44
  45#ifndef _LINUX_PERCPU_REFCOUNT_H
  46#define _LINUX_PERCPU_REFCOUNT_H
  47
  48#include <linux/atomic.h>
  49#include <linux/kernel.h>
  50#include <linux/percpu.h>
  51#include <linux/rcupdate.h>
  52#include <linux/gfp.h>
  53
  54struct percpu_ref;
  55typedef void (percpu_ref_func_t)(struct percpu_ref *);
  56
  57/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
  58enum {
  59        __PERCPU_REF_ATOMIC     = 1LU << 0,     /* operating in atomic mode */
  60        __PERCPU_REF_DEAD       = 1LU << 1,     /* (being) killed */
  61        __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
  62
  63        __PERCPU_REF_FLAG_BITS  = 2,
  64};
  65
  66/* @flags for percpu_ref_init() */
  67enum {
  68        /*
  69         * Start w/ ref == 1 in atomic mode.  Can be switched to percpu
  70         * operation using percpu_ref_switch_to_percpu().  If initialized
  71         * with this flag, the ref will stay in atomic mode until
  72         * percpu_ref_switch_to_percpu() is invoked on it.
  73         */
  74        PERCPU_REF_INIT_ATOMIC  = 1 << 0,
  75
  76        /*
  77         * Start dead w/ ref == 0 in atomic mode.  Must be revived with
  78         * percpu_ref_reinit() before used.  Implies INIT_ATOMIC.
  79         */
  80        PERCPU_REF_INIT_DEAD    = 1 << 1,
  81};
  82
  83struct percpu_ref {
  84        atomic_long_t           count;
  85        /*
  86         * The low bit of the pointer indicates whether the ref is in percpu
  87         * mode; if set, then get/put will manipulate the atomic_t.
  88         */
  89        unsigned long           percpu_count_ptr;
  90        percpu_ref_func_t       *release;
  91        percpu_ref_func_t       *confirm_switch;
  92        bool                    force_atomic:1;
  93        struct rcu_head         rcu;
  94};
  95
  96int __must_check percpu_ref_init(struct percpu_ref *ref,
  97                                 percpu_ref_func_t *release, unsigned int flags,
  98                                 gfp_t gfp);
  99void percpu_ref_exit(struct percpu_ref *ref);
 100void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
 101                                 percpu_ref_func_t *confirm_switch);
 102void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
 103void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
 104void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 105                                 percpu_ref_func_t *confirm_kill);
 106void percpu_ref_reinit(struct percpu_ref *ref);
 107
 108/**
 109 * percpu_ref_kill - drop the initial ref
 110 * @ref: percpu_ref to kill
 111 *
 112 * Must be used to drop the initial ref on a percpu refcount; must be called
 113 * precisely once before shutdown.
 114 *
 115 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
 116 * percpu counters and dropping the initial ref.
 117 */
 118static inline void percpu_ref_kill(struct percpu_ref *ref)
 119{
 120        percpu_ref_kill_and_confirm(ref, NULL);
 121}
 122
 123/*
 124 * Internal helper.  Don't use outside percpu-refcount proper.  The
 125 * function doesn't return the pointer and let the caller test it for NULL
 126 * because doing so forces the compiler to generate two conditional
 127 * branches as it can't assume that @ref->percpu_count is not NULL.
 128 */
 129static inline bool __ref_is_percpu(struct percpu_ref *ref,
 130                                          unsigned long __percpu **percpu_countp)
 131{
 132        unsigned long percpu_ptr;
 133
 134        /*
 135         * The value of @ref->percpu_count_ptr is tested for
 136         * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
 137         * used as a pointer.  If the compiler generates a separate fetch
 138         * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
 139         * between contaminating the pointer value, meaning that
 140         * READ_ONCE() is required when fetching it.
 141         */
 142        percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
 143
 144        /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
 145        smp_read_barrier_depends();
 146
 147        /*
 148         * Theoretically, the following could test just ATOMIC; however,
 149         * then we'd have to mask off DEAD separately as DEAD may be
 150         * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
 151         * implies ATOMIC anyway.  Test them together.
 152         */
 153        if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
 154                return false;
 155
 156        *percpu_countp = (unsigned long __percpu *)percpu_ptr;
 157        return true;
 158}
 159
 160/**
 161 * percpu_ref_get_many - increment a percpu refcount
 162 * @ref: percpu_ref to get
 163 * @nr: number of references to get
 164 *
 165 * Analogous to atomic_long_add().
 166 *
 167 * This function is safe to call as long as @ref is between init and exit.
 168 */
 169static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
 170{
 171        unsigned long __percpu *percpu_count;
 172
 173        rcu_read_lock_sched();
 174
 175        if (__ref_is_percpu(ref, &percpu_count))
 176                this_cpu_add(*percpu_count, nr);
 177        else
 178                atomic_long_add(nr, &ref->count);
 179
 180        rcu_read_unlock_sched();
 181}
 182
 183/**
 184 * percpu_ref_get - increment a percpu refcount
 185 * @ref: percpu_ref to get
 186 *
 187 * Analagous to atomic_long_inc().
 188 *
 189 * This function is safe to call as long as @ref is between init and exit.
 190 */
 191static inline void percpu_ref_get(struct percpu_ref *ref)
 192{
 193        percpu_ref_get_many(ref, 1);
 194}
 195
 196/**
 197 * percpu_ref_tryget - try to increment a percpu refcount
 198 * @ref: percpu_ref to try-get
 199 *
 200 * Increment a percpu refcount unless its count already reached zero.
 201 * Returns %true on success; %false on failure.
 202 *
 203 * This function is safe to call as long as @ref is between init and exit.
 204 */
 205static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 206{
 207        unsigned long __percpu *percpu_count;
 208        bool ret;
 209
 210        rcu_read_lock_sched();
 211
 212        if (__ref_is_percpu(ref, &percpu_count)) {
 213                this_cpu_inc(*percpu_count);
 214                ret = true;
 215        } else {
 216                ret = atomic_long_inc_not_zero(&ref->count);
 217        }
 218
 219        rcu_read_unlock_sched();
 220
 221        return ret;
 222}
 223
 224/**
 225 * percpu_ref_tryget_live - try to increment a live percpu refcount
 226 * @ref: percpu_ref to try-get
 227 *
 228 * Increment a percpu refcount unless it has already been killed.  Returns
 229 * %true on success; %false on failure.
 230 *
 231 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
 232 * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
 233 * should be used.  After the confirm_kill callback is invoked, it's
 234 * guaranteed that no new reference will be given out by
 235 * percpu_ref_tryget_live().
 236 *
 237 * This function is safe to call as long as @ref is between init and exit.
 238 */
 239static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 240{
 241        unsigned long __percpu *percpu_count;
 242        bool ret = false;
 243
 244        rcu_read_lock_sched();
 245
 246        if (__ref_is_percpu(ref, &percpu_count)) {
 247                this_cpu_inc(*percpu_count);
 248                ret = true;
 249        } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
 250                ret = atomic_long_inc_not_zero(&ref->count);
 251        }
 252
 253        rcu_read_unlock_sched();
 254
 255        return ret;
 256}
 257
 258/**
 259 * percpu_ref_put_many - decrement a percpu refcount
 260 * @ref: percpu_ref to put
 261 * @nr: number of references to put
 262 *
 263 * Decrement the refcount, and if 0, call the release function (which was passed
 264 * to percpu_ref_init())
 265 *
 266 * This function is safe to call as long as @ref is between init and exit.
 267 */
 268static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
 269{
 270        unsigned long __percpu *percpu_count;
 271
 272        rcu_read_lock_sched();
 273
 274        if (__ref_is_percpu(ref, &percpu_count))
 275                this_cpu_sub(*percpu_count, nr);
 276        else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
 277                ref->release(ref);
 278
 279        rcu_read_unlock_sched();
 280}
 281
 282/**
 283 * percpu_ref_put - decrement a percpu refcount
 284 * @ref: percpu_ref to put
 285 *
 286 * Decrement the refcount, and if 0, call the release function (which was passed
 287 * to percpu_ref_init())
 288 *
 289 * This function is safe to call as long as @ref is between init and exit.
 290 */
 291static inline void percpu_ref_put(struct percpu_ref *ref)
 292{
 293        percpu_ref_put_many(ref, 1);
 294}
 295
 296/**
 297 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
 298 * @ref: percpu_ref to test
 299 *
 300 * Returns %true if @ref is dying or dead.
 301 *
 302 * This function is safe to call as long as @ref is between init and exit
 303 * and the caller is responsible for synchronizing against state changes.
 304 */
 305static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
 306{
 307        return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
 308}
 309
 310/**
 311 * percpu_ref_is_zero - test whether a percpu refcount reached zero
 312 * @ref: percpu_ref to test
 313 *
 314 * Returns %true if @ref reached zero.
 315 *
 316 * This function is safe to call as long as @ref is between init and exit.
 317 */
 318static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
 319{
 320        unsigned long __percpu *percpu_count;
 321
 322        if (__ref_is_percpu(ref, &percpu_count))
 323                return false;
 324        return !atomic_long_read(&ref->count);
 325}
 326
 327#endif
 328