linux/include/asm-generic/rwsem.h
<<
>>
Prefs
   1#ifndef _ASM_GENERIC_RWSEM_H
   2#define _ASM_GENERIC_RWSEM_H
   3
   4#ifndef _LINUX_RWSEM_H
   5#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
   6#endif
   7
   8#ifdef __KERNEL__
   9
  10/*
  11 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
  12 * Adapted largely from include/asm-i386/rwsem.h
  13 * by Paul Mackerras <paulus@samba.org>.
  14 */
  15
  16/*
  17 * the semaphore definition
  18 */
  19#ifdef CONFIG_64BIT
  20# define RWSEM_ACTIVE_MASK              0xffffffffL
  21#else
  22# define RWSEM_ACTIVE_MASK              0x0000ffffL
  23#endif
  24
  25#define RWSEM_UNLOCKED_VALUE            0x00000000L
  26#define RWSEM_ACTIVE_BIAS               0x00000001L
  27#define RWSEM_WAITING_BIAS              (-RWSEM_ACTIVE_MASK-1)
  28#define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
  29#define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  30
  31/*
  32 * lock for reading
  33 */
  34static inline void __down_read(struct rw_semaphore *sem)
  35{
  36        if (unlikely(atomic_long_inc_return_acquire((atomic_long_t *)&sem->count) <= 0))
  37                rwsem_down_read_failed(sem);
  38}
  39
  40static inline int __down_read_trylock(struct rw_semaphore *sem)
  41{
  42        long tmp;
  43
  44        while ((tmp = sem->count) >= 0) {
  45                if (tmp == cmpxchg_acquire(&sem->count, tmp,
  46                                   tmp + RWSEM_ACTIVE_READ_BIAS)) {
  47                        return 1;
  48                }
  49        }
  50        return 0;
  51}
  52
  53/*
  54 * lock for writing
  55 */
  56static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
  57{
  58        long tmp;
  59
  60        tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
  61                                     (atomic_long_t *)&sem->count);
  62        if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  63                rwsem_down_write_failed(sem);
  64}
  65
  66static inline void __down_write(struct rw_semaphore *sem)
  67{
  68        __down_write_nested(sem, 0);
  69}
  70
  71static inline int __down_write_trylock(struct rw_semaphore *sem)
  72{
  73        long tmp;
  74
  75        tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
  76                      RWSEM_ACTIVE_WRITE_BIAS);
  77        return tmp == RWSEM_UNLOCKED_VALUE;
  78}
  79
  80/*
  81 * unlock after reading
  82 */
  83static inline void __up_read(struct rw_semaphore *sem)
  84{
  85        long tmp;
  86
  87        tmp = atomic_long_dec_return_release((atomic_long_t *)&sem->count);
  88        if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
  89                rwsem_wake(sem);
  90}
  91
  92/*
  93 * unlock after writing
  94 */
  95static inline void __up_write(struct rw_semaphore *sem)
  96{
  97        if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
  98                                 (atomic_long_t *)&sem->count) < 0))
  99                rwsem_wake(sem);
 100}
 101
 102/*
 103 * implement atomic add functionality
 104 */
 105static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
 106{
 107        atomic_long_add(delta, (atomic_long_t *)&sem->count);
 108}
 109
 110/*
 111 * downgrade write lock to read lock
 112 */
 113static inline void __downgrade_write(struct rw_semaphore *sem)
 114{
 115        long tmp;
 116
 117        /*
 118         * When downgrading from exclusive to shared ownership,
 119         * anything inside the write-locked region cannot leak
 120         * into the read side. In contrast, anything in the
 121         * read-locked region is ok to be re-ordered into the
 122         * write side. As such, rely on RELEASE semantics.
 123         */
 124        tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS,
 125                                     (atomic_long_t *)&sem->count);
 126        if (tmp < 0)
 127                rwsem_downgrade_wake(sem);
 128}
 129
 130/*
 131 * implement exchange and add functionality
 132 */
 133static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 134{
 135        return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
 136}
 137
 138#endif  /* __KERNEL__ */
 139#endif  /* _ASM_GENERIC_RWSEM_H */
 140