linux/include/linux/lockref.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_LOCKREF_H
   3#define __LINUX_LOCKREF_H
   4
   5/*
   6 * Locked reference counts.
   7 *
   8 * These are different from just plain atomic refcounts in that they
   9 * are atomic with respect to the spinlock that goes with them.  In
  10 * particular, there can be implementations that don't actually get
  11 * the spinlock for the common decrement/increment operations, but they
  12 * still have to check that the operation is done semantically as if
  13 * the spinlock had been taken (using a cmpxchg operation that covers
  14 * both the lock and the count word, or using memory transactions, for
  15 * example).
  16 */
  17
  18#include <linux/spinlock.h>
  19#include <generated/bounds.h>
  20
  21#define USE_CMPXCHG_LOCKREF \
  22        (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
  23         IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
  24
  25struct lockref {
  26        union {
  27#if USE_CMPXCHG_LOCKREF
  28                aligned_u64 lock_count;
  29#endif
  30                struct {
  31                        spinlock_t lock;
  32                        int count;
  33                };
  34        };
  35};
  36
  37extern void lockref_get(struct lockref *);
  38extern int lockref_put_return(struct lockref *);
  39extern int lockref_get_not_zero(struct lockref *);
  40extern int lockref_put_not_zero(struct lockref *);
  41extern int lockref_get_or_lock(struct lockref *);
  42extern int lockref_put_or_lock(struct lockref *);
  43
  44extern void lockref_mark_dead(struct lockref *);
  45extern int lockref_get_not_dead(struct lockref *);
  46
  47/* Must be called under spinlock for reliable results */
  48static inline bool __lockref_is_dead(const struct lockref *l)
  49{
  50        return ((int)l->count < 0);
  51}
  52
  53#endif /* __LINUX_LOCKREF_H */
  54