linux/include/linux/refcount.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Variant of atomic_t specialized for reference counts.
   4 *
   5 * The interface matches the atomic_t interface (to aid in porting) but only
   6 * provides the few functions one should use for reference counting.
   7 *
   8 * Saturation semantics
   9 * ====================
  10 *
  11 * refcount_t differs from atomic_t in that the counter saturates at
  12 * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
  13 * counter and causing 'spurious' use-after-free issues. In order to avoid the
  14 * cost associated with introducing cmpxchg() loops into all of the saturating
  15 * operations, we temporarily allow the counter to take on an unchecked value
  16 * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
  17 * or overflow has occurred. Although this is racy when multiple threads
  18 * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
  19 * equidistant from 0 and INT_MAX we minimise the scope for error:
  20 *
  21 *                                 INT_MAX     REFCOUNT_SATURATED   UINT_MAX
  22 *   0                          (0x7fff_ffff)    (0xc000_0000)    (0xffff_ffff)
  23 *   +--------------------------------+----------------+----------------+
  24 *                                     <---------- bad value! ---------->
  25 *
  26 * (in a signed view of the world, the "bad value" range corresponds to
  27 * a negative counter value).
  28 *
  29 * As an example, consider a refcount_inc() operation that causes the counter
  30 * to overflow:
  31 *
  32 *      int old = atomic_fetch_add_relaxed(r);
  33 *      // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
  34 *      if (old < 0)
  35 *              atomic_set(r, REFCOUNT_SATURATED);
  36 *
  37 * If another thread also performs a refcount_inc() operation between the two
  38 * atomic operations, then the count will continue to edge closer to 0. If it
  39 * reaches a value of 1 before /any/ of the threads reset it to the saturated
  40 * value, then a concurrent refcount_dec_and_test() may erroneously free the
  41 * underlying object.
  42 * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently
  43 * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK).
  44 * With the current PID limit, if no batched refcounting operations are used and
  45 * the attacker can't repeatedly trigger kernel oopses in the middle of refcount
  46 * operations, this makes it impossible for a saturated refcount to leave the
  47 * saturation range, even if it is possible for multiple uses of the same
  48 * refcount to nest in the context of a single task:
  49 *
  50 *     (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT =
  51 *     0x40000000 / 0x400000 = 0x100 = 256
  52 *
  53 * If hundreds of references are added/removed with a single refcounting
  54 * operation, it may potentially be possible to leave the saturation range; but
  55 * given the precise timing details involved with the round-robin scheduling of
  56 * each thread manipulating the refcount and the need to hit the race multiple
  57 * times in succession, there doesn't appear to be a practical avenue of attack
  58 * even if using refcount_add() operations with larger increments.
  59 *
  60 * Memory ordering
  61 * ===============
  62 *
  63 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
  64 * and provide only what is strictly required for refcounts.
  65 *
  66 * The increments are fully relaxed; these will not provide ordering. The
  67 * rationale is that whatever is used to obtain the object we're increasing the
  68 * reference count on will provide the ordering. For locked data structures,
  69 * its the lock acquire, for RCU/lockless data structures its the dependent
  70 * load.
  71 *
  72 * Do note that inc_not_zero() provides a control dependency which will order
  73 * future stores against the inc, this ensures we'll never modify the object
  74 * if we did not in fact acquire a reference.
  75 *
  76 * The decrements will provide release order, such that all the prior loads and
  77 * stores will be issued before, it also provides a control dependency, which
  78 * will order us against the subsequent free().
  79 *
  80 * The control dependency is against the load of the cmpxchg (ll/sc) that
  81 * succeeded. This means the stores aren't fully ordered, but this is fine
  82 * because the 1->0 transition indicates no concurrency.
  83 *
  84 * Note that the allocator is responsible for ordering things between free()
  85 * and alloc().
  86 *
  87 * The decrements dec_and_test() and sub_and_test() also provide acquire
  88 * ordering on success.
  89 *
  90 */
  91
  92#ifndef _LINUX_REFCOUNT_H
  93#define _LINUX_REFCOUNT_H
  94
  95#include <linux/atomic.h>
  96#include <linux/bug.h>
  97#include <linux/compiler.h>
  98#include <linux/limits.h>
  99#include <linux/spinlock_types.h>
 100
 101struct mutex;
 102
 103/**
 104 * struct refcount_t - variant of atomic_t specialized for reference counts
 105 * @refs: atomic_t counter field
 106 *
 107 * The counter saturates at REFCOUNT_SATURATED and will not move once
 108 * there. This avoids wrapping the counter and causing 'spurious'
 109 * use-after-free bugs.
 110 */
 111typedef struct refcount_struct {
 112        atomic_t refs;
 113} refcount_t;
 114
 115#define REFCOUNT_INIT(n)        { .refs = ATOMIC_INIT(n), }
 116#define REFCOUNT_MAX            INT_MAX
 117#define REFCOUNT_SATURATED      (INT_MIN / 2)
 118
 119enum refcount_saturation_type {
 120        REFCOUNT_ADD_NOT_ZERO_OVF,
 121        REFCOUNT_ADD_OVF,
 122        REFCOUNT_ADD_UAF,
 123        REFCOUNT_SUB_UAF,
 124        REFCOUNT_DEC_LEAK,
 125};
 126
 127void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
 128
 129/**
 130 * refcount_set - set a refcount's value
 131 * @r: the refcount
 132 * @n: value to which the refcount will be set
 133 */
 134static inline void refcount_set(refcount_t *r, int n)
 135{
 136        atomic_set(&r->refs, n);
 137}
 138
 139/**
 140 * refcount_read - get a refcount's value
 141 * @r: the refcount
 142 *
 143 * Return: the refcount's value
 144 */
 145static inline unsigned int refcount_read(const refcount_t *r)
 146{
 147        return atomic_read(&r->refs);
 148}
 149
 150/**
 151 * refcount_add_not_zero - add a value to a refcount unless it is 0
 152 * @i: the value to add to the refcount
 153 * @r: the refcount
 154 *
 155 * Will saturate at REFCOUNT_SATURATED and WARN.
 156 *
 157 * Provides no memory ordering, it is assumed the caller has guaranteed the
 158 * object memory to be stable (RCU, etc.). It does provide a control dependency
 159 * and thereby orders future stores. See the comment on top.
 160 *
 161 * Use of this function is not recommended for the normal reference counting
 162 * use case in which references are taken and released one at a time.  In these
 163 * cases, refcount_inc(), or one of its variants, should instead be used to
 164 * increment a reference count.
 165 *
 166 * Return: false if the passed refcount is 0, true otherwise
 167 */
 168static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
 169{
 170        int old = refcount_read(r);
 171
 172        do {
 173                if (!old)
 174                        break;
 175        } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
 176
 177        if (unlikely(old < 0 || old + i < 0))
 178                refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
 179
 180        return old;
 181}
 182
 183/**
 184 * refcount_add - add a value to a refcount
 185 * @i: the value to add to the refcount
 186 * @r: the refcount
 187 *
 188 * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
 189 *
 190 * Provides no memory ordering, it is assumed the caller has guaranteed the
 191 * object memory to be stable (RCU, etc.). It does provide a control dependency
 192 * and thereby orders future stores. See the comment on top.
 193 *
 194 * Use of this function is not recommended for the normal reference counting
 195 * use case in which references are taken and released one at a time.  In these
 196 * cases, refcount_inc(), or one of its variants, should instead be used to
 197 * increment a reference count.
 198 */
 199static inline void refcount_add(int i, refcount_t *r)
 200{
 201        int old = atomic_fetch_add_relaxed(i, &r->refs);
 202
 203        if (unlikely(!old))
 204                refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
 205        else if (unlikely(old < 0 || old + i < 0))
 206                refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
 207}
 208
 209/**
 210 * refcount_inc_not_zero - increment a refcount unless it is 0
 211 * @r: the refcount to increment
 212 *
 213 * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
 214 * and WARN.
 215 *
 216 * Provides no memory ordering, it is assumed the caller has guaranteed the
 217 * object memory to be stable (RCU, etc.). It does provide a control dependency
 218 * and thereby orders future stores. See the comment on top.
 219 *
 220 * Return: true if the increment was successful, false otherwise
 221 */
 222static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
 223{
 224        return refcount_add_not_zero(1, r);
 225}
 226
 227/**
 228 * refcount_inc - increment a refcount
 229 * @r: the refcount to increment
 230 *
 231 * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
 232 *
 233 * Provides no memory ordering, it is assumed the caller already has a
 234 * reference on the object.
 235 *
 236 * Will WARN if the refcount is 0, as this represents a possible use-after-free
 237 * condition.
 238 */
 239static inline void refcount_inc(refcount_t *r)
 240{
 241        refcount_add(1, r);
 242}
 243
 244/**
 245 * refcount_sub_and_test - subtract from a refcount and test if it is 0
 246 * @i: amount to subtract from the refcount
 247 * @r: the refcount
 248 *
 249 * Similar to atomic_dec_and_test(), but it will WARN, return false and
 250 * ultimately leak on underflow and will fail to decrement when saturated
 251 * at REFCOUNT_SATURATED.
 252 *
 253 * Provides release memory ordering, such that prior loads and stores are done
 254 * before, and provides an acquire ordering on success such that free()
 255 * must come after.
 256 *
 257 * Use of this function is not recommended for the normal reference counting
 258 * use case in which references are taken and released one at a time.  In these
 259 * cases, refcount_dec(), or one of its variants, should instead be used to
 260 * decrement a reference count.
 261 *
 262 * Return: true if the resulting refcount is 0, false otherwise
 263 */
 264static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
 265{
 266        int old = atomic_fetch_sub_release(i, &r->refs);
 267
 268        if (old == i) {
 269                smp_acquire__after_ctrl_dep();
 270                return true;
 271        }
 272
 273        if (unlikely(old < 0 || old - i < 0))
 274                refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
 275
 276        return false;
 277}
 278
 279/**
 280 * refcount_dec_and_test - decrement a refcount and test if it is 0
 281 * @r: the refcount
 282 *
 283 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
 284 * decrement when saturated at REFCOUNT_SATURATED.
 285 *
 286 * Provides release memory ordering, such that prior loads and stores are done
 287 * before, and provides an acquire ordering on success such that free()
 288 * must come after.
 289 *
 290 * Return: true if the resulting refcount is 0, false otherwise
 291 */
 292static inline __must_check bool refcount_dec_and_test(refcount_t *r)
 293{
 294        return refcount_sub_and_test(1, r);
 295}
 296
 297/**
 298 * refcount_dec - decrement a refcount
 299 * @r: the refcount
 300 *
 301 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
 302 * when saturated at REFCOUNT_SATURATED.
 303 *
 304 * Provides release memory ordering, such that prior loads and stores are done
 305 * before.
 306 */
 307static inline void refcount_dec(refcount_t *r)
 308{
 309        if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
 310                refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
 311}
 312
 313extern __must_check bool refcount_dec_if_one(refcount_t *r);
 314extern __must_check bool refcount_dec_not_one(refcount_t *r);
 315extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
 316extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
 317extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
 318                                                       spinlock_t *lock,
 319                                                       unsigned long *flags);
 320#endif /* _LINUX_REFCOUNT_H */
 321