linux/arch/sh/include/asm/rwsem.h
<<
>>
Prefs
   1/*
   2 * include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
   3 * in lib/rwsem.c.
   4 */
   5
   6#ifndef _ASM_SH_RWSEM_H
   7#define _ASM_SH_RWSEM_H
   8
   9#ifndef _LINUX_RWSEM_H
  10#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  11#endif
  12
  13#ifdef __KERNEL__
  14#include <linux/list.h>
  15#include <linux/spinlock.h>
  16#include <asm/atomic.h>
  17#include <asm/system.h>
  18
  19/*
  20 * the semaphore definition
  21 */
  22struct rw_semaphore {
  23        long            count;
  24#define RWSEM_UNLOCKED_VALUE            0x00000000
  25#define RWSEM_ACTIVE_BIAS               0x00000001
  26#define RWSEM_ACTIVE_MASK               0x0000ffff
  27#define RWSEM_WAITING_BIAS              (-0x00010000)
  28#define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
  29#define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  30        spinlock_t              wait_lock;
  31        struct list_head        wait_list;
  32#ifdef CONFIG_DEBUG_LOCK_ALLOC
  33        struct lockdep_map      dep_map;
  34#endif
  35};
  36
  37#ifdef CONFIG_DEBUG_LOCK_ALLOC
  38# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  39#else
  40# define __RWSEM_DEP_MAP_INIT(lockname)
  41#endif
  42
  43#define __RWSEM_INITIALIZER(name) \
  44        { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
  45          LIST_HEAD_INIT((name).wait_list) \
  46          __RWSEM_DEP_MAP_INIT(name) }
  47
  48#define DECLARE_RWSEM(name)             \
  49        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  50
  51extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  52extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  53extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  54extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  55
  56extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
  57                         struct lock_class_key *key);
  58
  59#define init_rwsem(sem)                         \
  60do {                                            \
  61        static struct lock_class_key __key;     \
  62                                                \
  63        __init_rwsem((sem), #sem, &__key);      \
  64} while (0)
  65
  66static inline void init_rwsem(struct rw_semaphore *sem)
  67{
  68        sem->count = RWSEM_UNLOCKED_VALUE;
  69        spin_lock_init(&sem->wait_lock);
  70        INIT_LIST_HEAD(&sem->wait_list);
  71}
  72
  73/*
  74 * lock for reading
  75 */
  76static inline void __down_read(struct rw_semaphore *sem)
  77{
  78        if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
  79                smp_wmb();
  80        else
  81                rwsem_down_read_failed(sem);
  82}
  83
  84static inline int __down_read_trylock(struct rw_semaphore *sem)
  85{
  86        int tmp;
  87
  88        while ((tmp = sem->count) >= 0) {
  89                if (tmp == cmpxchg(&sem->count, tmp,
  90                                   tmp + RWSEM_ACTIVE_READ_BIAS)) {
  91                        smp_wmb();
  92                        return 1;
  93                }
  94        }
  95        return 0;
  96}
  97
  98/*
  99 * lock for writing
 100 */
 101static inline void __down_write(struct rw_semaphore *sem)
 102{
 103        int tmp;
 104
 105        tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
 106                                (atomic_t *)(&sem->count));
 107        if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
 108                smp_wmb();
 109        else
 110                rwsem_down_write_failed(sem);
 111}
 112
 113static inline int __down_write_trylock(struct rw_semaphore *sem)
 114{
 115        int tmp;
 116
 117        tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
 118                      RWSEM_ACTIVE_WRITE_BIAS);
 119        smp_wmb();
 120        return tmp == RWSEM_UNLOCKED_VALUE;
 121}
 122
 123/*
 124 * unlock after reading
 125 */
 126static inline void __up_read(struct rw_semaphore *sem)
 127{
 128        int tmp;
 129
 130        smp_wmb();
 131        tmp = atomic_dec_return((atomic_t *)(&sem->count));
 132        if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
 133                rwsem_wake(sem);
 134}
 135
 136/*
 137 * unlock after writing
 138 */
 139static inline void __up_write(struct rw_semaphore *sem)
 140{
 141        smp_wmb();
 142        if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
 143                              (atomic_t *)(&sem->count)) < 0)
 144                rwsem_wake(sem);
 145}
 146
 147/*
 148 * implement atomic add functionality
 149 */
 150static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
 151{
 152        atomic_add(delta, (atomic_t *)(&sem->count));
 153}
 154
 155/*
 156 * downgrade write lock to read lock
 157 */
 158static inline void __downgrade_write(struct rw_semaphore *sem)
 159{
 160        int tmp;
 161
 162        smp_wmb();
 163        tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
 164        if (tmp < 0)
 165                rwsem_downgrade_wake(sem);
 166}
 167
 168static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 169{
 170        __down_write(sem);
 171}
 172
 173/*
 174 * implement exchange and add functionality
 175 */
 176static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
 177{
 178        smp_mb();
 179        return atomic_add_return(delta, (atomic_t *)(&sem->count));
 180}
 181
 182static inline int rwsem_is_locked(struct rw_semaphore *sem)
 183{
 184        return (sem->count != 0);
 185}
 186
 187#endif /* __KERNEL__ */
 188#endif /* _ASM_SH_RWSEM_H */
 189