linux/include/linux/rwsem.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/* rwsem.h: R/W semaphores, public interface
   3 *
   4 * Written by David Howells (dhowells@redhat.com).
   5 * Derived from asm-i386/semaphore.h
   6 */
   7
   8#ifndef _LINUX_RWSEM_H
   9#define _LINUX_RWSEM_H
  10
  11#include <linux/linkage.h>
  12
  13#include <linux/types.h>
  14#include <linux/kernel.h>
  15#include <linux/list.h>
  16#include <linux/spinlock.h>
  17#include <linux/atomic.h>
  18#include <linux/err.h>
  19
  20#ifdef CONFIG_DEBUG_LOCK_ALLOC
  21# define __RWSEM_DEP_MAP_INIT(lockname)                 \
  22        .dep_map = {                                    \
  23                .name = #lockname,                      \
  24                .wait_type_inner = LD_WAIT_SLEEP,       \
  25        },
  26#else
  27# define __RWSEM_DEP_MAP_INIT(lockname)
  28#endif
  29
  30#ifndef CONFIG_PREEMPT_RT
  31
  32#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
  33#include <linux/osq_lock.h>
  34#endif
  35
  36/*
  37 * For an uncontended rwsem, count and owner are the only fields a task
  38 * needs to touch when acquiring the rwsem. So they are put next to each
  39 * other to increase the chance that they will share the same cacheline.
  40 *
  41 * In a contended rwsem, the owner is likely the most frequently accessed
  42 * field in the structure as the optimistic waiter that holds the osq lock
  43 * will spin on owner. For an embedded rwsem, other hot fields in the
  44 * containing structure should be moved further away from the rwsem to
  45 * reduce the chance that they will share the same cacheline causing
  46 * cacheline bouncing problem.
  47 */
  48struct rw_semaphore {
  49        atomic_long_t count;
  50        /*
  51         * Write owner or one of the read owners as well flags regarding
  52         * the current state of the rwsem. Can be used as a speculative
  53         * check to see if the write owner is running on the cpu.
  54         */
  55        atomic_long_t owner;
  56#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
  57        struct optimistic_spin_queue osq; /* spinner MCS lock */
  58#endif
  59        raw_spinlock_t wait_lock;
  60        struct list_head wait_list;
  61#ifdef CONFIG_DEBUG_RWSEMS
  62        void *magic;
  63#endif
  64#ifdef CONFIG_DEBUG_LOCK_ALLOC
  65        struct lockdep_map      dep_map;
  66#endif
  67};
  68
  69/* In all implementations count != 0 means locked */
  70static inline int rwsem_is_locked(struct rw_semaphore *sem)
  71{
  72        return atomic_long_read(&sem->count) != 0;
  73}
  74
  75#define RWSEM_UNLOCKED_VALUE            0L
  76#define __RWSEM_COUNT_INIT(name)        .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
  77
  78/* Common initializer macros and functions */
  79
  80#ifdef CONFIG_DEBUG_RWSEMS
  81# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
  82#else
  83# define __RWSEM_DEBUG_INIT(lockname)
  84#endif
  85
  86#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
  87#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
  88#else
  89#define __RWSEM_OPT_INIT(lockname)
  90#endif
  91
  92#define __RWSEM_INITIALIZER(name)                               \
  93        { __RWSEM_COUNT_INIT(name),                             \
  94          .owner = ATOMIC_LONG_INIT(0),                         \
  95          __RWSEM_OPT_INIT(name)                                \
  96          .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
  97          .wait_list = LIST_HEAD_INIT((name).wait_list),        \
  98          __RWSEM_DEBUG_INIT(name)                              \
  99          __RWSEM_DEP_MAP_INIT(name) }
 100
 101#define DECLARE_RWSEM(name) \
 102        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
 103
 104extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
 105                         struct lock_class_key *key);
 106
 107#define init_rwsem(sem)                                         \
 108do {                                                            \
 109        static struct lock_class_key __key;                     \
 110                                                                \
 111        __init_rwsem((sem), #sem, &__key);                      \
 112} while (0)
 113
 114/*
 115 * This is the same regardless of which rwsem implementation that is being used.
 116 * It is just a heuristic meant to be called by somebody already holding the
 117 * rwsem to see if somebody from an incompatible type is wanting access to the
 118 * lock.
 119 */
 120static inline int rwsem_is_contended(struct rw_semaphore *sem)
 121{
 122        return !list_empty(&sem->wait_list);
 123}
 124
 125#else /* !CONFIG_PREEMPT_RT */
 126
 127#include <linux/rwbase_rt.h>
 128
 129struct rw_semaphore {
 130        struct rwbase_rt        rwbase;
 131#ifdef CONFIG_DEBUG_LOCK_ALLOC
 132        struct lockdep_map      dep_map;
 133#endif
 134};
 135
 136#define __RWSEM_INITIALIZER(name)                               \
 137        {                                                       \
 138                .rwbase = __RWBASE_INITIALIZER(name),           \
 139                __RWSEM_DEP_MAP_INIT(name)                      \
 140        }
 141
 142#define DECLARE_RWSEM(lockname) \
 143        struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
 144
 145extern void  __init_rwsem(struct rw_semaphore *rwsem, const char *name,
 146                          struct lock_class_key *key);
 147
 148#define init_rwsem(sem)                                         \
 149do {                                                            \
 150        static struct lock_class_key __key;                     \
 151                                                                \
 152        __init_rwsem((sem), #sem, &__key);                      \
 153} while (0)
 154
 155static __always_inline int rwsem_is_locked(struct rw_semaphore *sem)
 156{
 157        return rw_base_is_locked(&sem->rwbase);
 158}
 159
 160static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
 161{
 162        return rw_base_is_contended(&sem->rwbase);
 163}
 164
 165#endif /* CONFIG_PREEMPT_RT */
 166
 167/*
 168 * The functions below are the same for all rwsem implementations including
 169 * the RT specific variant.
 170 */
 171
 172/*
 173 * lock for reading
 174 */
 175extern void down_read(struct rw_semaphore *sem);
 176extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
 177extern int __must_check down_read_killable(struct rw_semaphore *sem);
 178
 179/*
 180 * trylock for reading -- returns 1 if successful, 0 if contention
 181 */
 182extern int down_read_trylock(struct rw_semaphore *sem);
 183
 184/*
 185 * lock for writing
 186 */
 187extern void down_write(struct rw_semaphore *sem);
 188extern int __must_check down_write_killable(struct rw_semaphore *sem);
 189
 190/*
 191 * trylock for writing -- returns 1 if successful, 0 if contention
 192 */
 193extern int down_write_trylock(struct rw_semaphore *sem);
 194
 195/*
 196 * release a read lock
 197 */
 198extern void up_read(struct rw_semaphore *sem);
 199
 200/*
 201 * release a write lock
 202 */
 203extern void up_write(struct rw_semaphore *sem);
 204
 205/*
 206 * downgrade write lock to read lock
 207 */
 208extern void downgrade_write(struct rw_semaphore *sem);
 209
 210#ifdef CONFIG_DEBUG_LOCK_ALLOC
 211/*
 212 * nested locking. NOTE: rwsems are not allowed to recurse
 213 * (which occurs if the same task tries to acquire the same
 214 * lock instance multiple times), but multiple locks of the
 215 * same lock class might be taken, if the order of the locks
 216 * is always the same. This ordering rule can be expressed
 217 * to lockdep via the _nested() APIs, but enumerating the
 218 * subclasses that are used. (If the nesting relationship is
 219 * static then another method for expressing nested locking is
 220 * the explicit definition of lock class keys and the use of
 221 * lockdep_set_class() at lock initialization time.
 222 * See Documentation/locking/lockdep-design.rst for more details.)
 223 */
 224extern void down_read_nested(struct rw_semaphore *sem, int subclass);
 225extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
 226extern void down_write_nested(struct rw_semaphore *sem, int subclass);
 227extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
 228extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
 229
 230# define down_write_nest_lock(sem, nest_lock)                   \
 231do {                                                            \
 232        typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
 233        _down_write_nest_lock(sem, &(nest_lock)->dep_map);      \
 234} while (0);
 235
 236/*
 237 * Take/release a lock when not the owner will release it.
 238 *
 239 * [ This API should be avoided as much as possible - the
 240 *   proper abstraction for this case is completions. ]
 241 */
 242extern void down_read_non_owner(struct rw_semaphore *sem);
 243extern void up_read_non_owner(struct rw_semaphore *sem);
 244#else
 245# define down_read_nested(sem, subclass)                down_read(sem)
 246# define down_read_killable_nested(sem, subclass)       down_read_killable(sem)
 247# define down_write_nest_lock(sem, nest_lock)   down_write(sem)
 248# define down_write_nested(sem, subclass)       down_write(sem)
 249# define down_write_killable_nested(sem, subclass)      down_write_killable(sem)
 250# define down_read_non_owner(sem)               down_read(sem)
 251# define up_read_non_owner(sem)                 up_read(sem)
 252#endif
 253
 254#endif /* _LINUX_RWSEM_H */
 255