linux/include/linux/percpu-refcount.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Percpu refcounts:
   4 * (C) 2012 Google, Inc.
   5 * Author: Kent Overstreet <koverstreet@google.com>
   6 *
   7 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
   8 * atomic_dec_and_test() - but percpu.
   9 *
  10 * There's one important difference between percpu refs and normal atomic_t
  11 * refcounts; you have to keep track of your initial refcount, and then when you
  12 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
  13 * refcount.
  14 *
  15 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
  16 * than an atomic_t - this is because of the way shutdown works, see
  17 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
  18 *
  19 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
  20 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
  21 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
  22 * issuing the appropriate barriers, and then marks the ref as shutting down so
  23 * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
  24 * it's safe to drop the initial ref.
  25 *
  26 * USAGE:
  27 *
  28 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
  29 * is created when userspaces calls io_setup(), and destroyed when userspace
  30 * calls io_destroy() or the process exits.
  31 *
  32 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
  33 * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
  34 * After that, there can't be any new users of the kioctx (from lookup_ioctx())
  35 * and it's then safe to drop the initial ref with percpu_ref_put().
  36 *
  37 * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
  38 * to synchronize with RCU protected lookup_ioctx().  percpu_ref operations don't
  39 * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
  40 * with RCU protection, it must be done explicitly.
  41 *
  42 * Code that does a two stage shutdown like this often needs some kind of
  43 * explicit synchronization to ensure the initial refcount can only be dropped
  44 * once - percpu_ref_kill() does this for you, it returns true once and false if
  45 * someone else already called it. The aio code uses it this way, but it's not
  46 * necessary if the code has some other mechanism to synchronize teardown.
  47 * around.
  48 */
  49
  50#ifndef _LINUX_PERCPU_REFCOUNT_H
  51#define _LINUX_PERCPU_REFCOUNT_H
  52
  53#include <linux/atomic.h>
  54#include <linux/kernel.h>
  55#include <linux/percpu.h>
  56#include <linux/rcupdate.h>
  57#include <linux/gfp.h>
  58
  59struct percpu_ref;
  60typedef void (percpu_ref_func_t)(struct percpu_ref *);
  61
  62/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
  63enum {
  64        __PERCPU_REF_ATOMIC     = 1LU << 0,     /* operating in atomic mode */
  65        __PERCPU_REF_DEAD       = 1LU << 1,     /* (being) killed */
  66        __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
  67
  68        __PERCPU_REF_FLAG_BITS  = 2,
  69};
  70
  71/* @flags for percpu_ref_init() */
  72enum {
  73        /*
  74         * Start w/ ref == 1 in atomic mode.  Can be switched to percpu
  75         * operation using percpu_ref_switch_to_percpu().  If initialized
  76         * with this flag, the ref will stay in atomic mode until
  77         * percpu_ref_switch_to_percpu() is invoked on it.
  78         * Implies ALLOW_REINIT.
  79         */
  80        PERCPU_REF_INIT_ATOMIC  = 1 << 0,
  81
  82        /*
  83         * Start dead w/ ref == 0 in atomic mode.  Must be revived with
  84         * percpu_ref_reinit() before used.  Implies INIT_ATOMIC and
  85         * ALLOW_REINIT.
  86         */
  87        PERCPU_REF_INIT_DEAD    = 1 << 1,
  88
  89        /*
  90         * Allow switching from atomic mode to percpu mode.
  91         */
  92        PERCPU_REF_ALLOW_REINIT = 1 << 2,
  93};
  94
  95struct percpu_ref_data {
  96        atomic_long_t           count;
  97        percpu_ref_func_t       *release;
  98        percpu_ref_func_t       *confirm_switch;
  99        bool                    force_atomic:1;
 100        bool                    allow_reinit:1;
 101        struct rcu_head         rcu;
 102        struct percpu_ref       *ref;
 103};
 104
 105struct percpu_ref {
 106        /*
 107         * The low bit of the pointer indicates whether the ref is in percpu
 108         * mode; if set, then get/put will manipulate the atomic_t.
 109         */
 110        unsigned long           percpu_count_ptr;
 111
 112        /*
 113         * 'percpu_ref' is often embedded into user structure, and only
 114         * 'percpu_count_ptr' is required in fast path, move other fields
 115         * into 'percpu_ref_data', so we can reduce memory footprint in
 116         * fast path.
 117         */
 118        struct percpu_ref_data  *data;
 119};
 120
 121int __must_check percpu_ref_init(struct percpu_ref *ref,
 122                                 percpu_ref_func_t *release, unsigned int flags,
 123                                 gfp_t gfp);
 124void percpu_ref_exit(struct percpu_ref *ref);
 125void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
 126                                 percpu_ref_func_t *confirm_switch);
 127void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
 128void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
 129void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 130                                 percpu_ref_func_t *confirm_kill);
 131void percpu_ref_resurrect(struct percpu_ref *ref);
 132void percpu_ref_reinit(struct percpu_ref *ref);
 133bool percpu_ref_is_zero(struct percpu_ref *ref);
 134
 135/**
 136 * percpu_ref_kill - drop the initial ref
 137 * @ref: percpu_ref to kill
 138 *
 139 * Must be used to drop the initial ref on a percpu refcount; must be called
 140 * precisely once before shutdown.
 141 *
 142 * Switches @ref into atomic mode before gathering up the percpu counters
 143 * and dropping the initial ref.
 144 *
 145 * There are no implied RCU grace periods between kill and release.
 146 */
 147static inline void percpu_ref_kill(struct percpu_ref *ref)
 148{
 149        percpu_ref_kill_and_confirm(ref, NULL);
 150}
 151
 152/*
 153 * Internal helper.  Don't use outside percpu-refcount proper.  The
 154 * function doesn't return the pointer and let the caller test it for NULL
 155 * because doing so forces the compiler to generate two conditional
 156 * branches as it can't assume that @ref->percpu_count is not NULL.
 157 */
 158static inline bool __ref_is_percpu(struct percpu_ref *ref,
 159                                          unsigned long __percpu **percpu_countp)
 160{
 161        unsigned long percpu_ptr;
 162
 163        /*
 164         * The value of @ref->percpu_count_ptr is tested for
 165         * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
 166         * used as a pointer.  If the compiler generates a separate fetch
 167         * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
 168         * between contaminating the pointer value, meaning that
 169         * READ_ONCE() is required when fetching it.
 170         *
 171         * The dependency ordering from the READ_ONCE() pairs
 172         * with smp_store_release() in __percpu_ref_switch_to_percpu().
 173         */
 174        percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
 175
 176        /*
 177         * Theoretically, the following could test just ATOMIC; however,
 178         * then we'd have to mask off DEAD separately as DEAD may be
 179         * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
 180         * implies ATOMIC anyway.  Test them together.
 181         */
 182        if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
 183                return false;
 184
 185        *percpu_countp = (unsigned long __percpu *)percpu_ptr;
 186        return true;
 187}
 188
 189/**
 190 * percpu_ref_get_many - increment a percpu refcount
 191 * @ref: percpu_ref to get
 192 * @nr: number of references to get
 193 *
 194 * Analogous to atomic_long_add().
 195 *
 196 * This function is safe to call as long as @ref is between init and exit.
 197 */
 198static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
 199{
 200        unsigned long __percpu *percpu_count;
 201
 202        rcu_read_lock();
 203
 204        if (__ref_is_percpu(ref, &percpu_count))
 205                this_cpu_add(*percpu_count, nr);
 206        else
 207                atomic_long_add(nr, &ref->data->count);
 208
 209        rcu_read_unlock();
 210}
 211
 212/**
 213 * percpu_ref_get - increment a percpu refcount
 214 * @ref: percpu_ref to get
 215 *
 216 * Analagous to atomic_long_inc().
 217 *
 218 * This function is safe to call as long as @ref is between init and exit.
 219 */
 220static inline void percpu_ref_get(struct percpu_ref *ref)
 221{
 222        percpu_ref_get_many(ref, 1);
 223}
 224
 225/**
 226 * percpu_ref_tryget_many - try to increment a percpu refcount
 227 * @ref: percpu_ref to try-get
 228 * @nr: number of references to get
 229 *
 230 * Increment a percpu refcount  by @nr unless its count already reached zero.
 231 * Returns %true on success; %false on failure.
 232 *
 233 * This function is safe to call as long as @ref is between init and exit.
 234 */
 235static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
 236                                          unsigned long nr)
 237{
 238        unsigned long __percpu *percpu_count;
 239        bool ret;
 240
 241        rcu_read_lock();
 242
 243        if (__ref_is_percpu(ref, &percpu_count)) {
 244                this_cpu_add(*percpu_count, nr);
 245                ret = true;
 246        } else {
 247                ret = atomic_long_add_unless(&ref->data->count, nr, 0);
 248        }
 249
 250        rcu_read_unlock();
 251
 252        return ret;
 253}
 254
 255/**
 256 * percpu_ref_tryget - try to increment a percpu refcount
 257 * @ref: percpu_ref to try-get
 258 *
 259 * Increment a percpu refcount unless its count already reached zero.
 260 * Returns %true on success; %false on failure.
 261 *
 262 * This function is safe to call as long as @ref is between init and exit.
 263 */
 264static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 265{
 266        return percpu_ref_tryget_many(ref, 1);
 267}
 268
 269/**
 270 * percpu_ref_tryget_live - try to increment a live percpu refcount
 271 * @ref: percpu_ref to try-get
 272 *
 273 * Increment a percpu refcount unless it has already been killed.  Returns
 274 * %true on success; %false on failure.
 275 *
 276 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
 277 * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
 278 * should be used.  After the confirm_kill callback is invoked, it's
 279 * guaranteed that no new reference will be given out by
 280 * percpu_ref_tryget_live().
 281 *
 282 * This function is safe to call as long as @ref is between init and exit.
 283 */
 284static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 285{
 286        unsigned long __percpu *percpu_count;
 287        bool ret = false;
 288
 289        rcu_read_lock();
 290
 291        if (__ref_is_percpu(ref, &percpu_count)) {
 292                this_cpu_inc(*percpu_count);
 293                ret = true;
 294        } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
 295                ret = atomic_long_inc_not_zero(&ref->data->count);
 296        }
 297
 298        rcu_read_unlock();
 299
 300        return ret;
 301}
 302
 303/**
 304 * percpu_ref_put_many - decrement a percpu refcount
 305 * @ref: percpu_ref to put
 306 * @nr: number of references to put
 307 *
 308 * Decrement the refcount, and if 0, call the release function (which was passed
 309 * to percpu_ref_init())
 310 *
 311 * This function is safe to call as long as @ref is between init and exit.
 312 */
 313static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
 314{
 315        unsigned long __percpu *percpu_count;
 316
 317        rcu_read_lock();
 318
 319        if (__ref_is_percpu(ref, &percpu_count))
 320                this_cpu_sub(*percpu_count, nr);
 321        else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
 322                ref->data->release(ref);
 323
 324        rcu_read_unlock();
 325}
 326
 327/**
 328 * percpu_ref_put - decrement a percpu refcount
 329 * @ref: percpu_ref to put
 330 *
 331 * Decrement the refcount, and if 0, call the release function (which was passed
 332 * to percpu_ref_init())
 333 *
 334 * This function is safe to call as long as @ref is between init and exit.
 335 */
 336static inline void percpu_ref_put(struct percpu_ref *ref)
 337{
 338        percpu_ref_put_many(ref, 1);
 339}
 340
 341/**
 342 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
 343 * @ref: percpu_ref to test
 344 *
 345 * Returns %true if @ref is dying or dead.
 346 *
 347 * This function is safe to call as long as @ref is between init and exit
 348 * and the caller is responsible for synchronizing against state changes.
 349 */
 350static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
 351{
 352        return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
 353}
 354
 355#endif
 356