linux/arch/sh/include/asm/rwsem.h
<<
>>
Prefs
   1/*
   2 * include/asm-sh/rwsem.h: R/W semaphores for SH using the stuff
   3 * in lib/rwsem.c.
   4 */
   5
   6#ifndef _ASM_SH_RWSEM_H
   7#define _ASM_SH_RWSEM_H
   8
   9#ifndef _LINUX_RWSEM_H
  10#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  11#endif
  12
  13#ifdef __KERNEL__
  14
  15#define RWSEM_UNLOCKED_VALUE            0x00000000
  16#define RWSEM_ACTIVE_BIAS               0x00000001
  17#define RWSEM_ACTIVE_MASK               0x0000ffff
  18#define RWSEM_WAITING_BIAS              (-0x00010000)
  19#define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
  20#define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  21
  22/*
  23 * lock for reading
  24 */
  25static inline void __down_read(struct rw_semaphore *sem)
  26{
  27        if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
  28                smp_wmb();
  29        else
  30                rwsem_down_read_failed(sem);
  31}
  32
  33static inline int __down_read_trylock(struct rw_semaphore *sem)
  34{
  35        int tmp;
  36
  37        while ((tmp = sem->count) >= 0) {
  38                if (tmp == cmpxchg(&sem->count, tmp,
  39                                   tmp + RWSEM_ACTIVE_READ_BIAS)) {
  40                        smp_wmb();
  41                        return 1;
  42                }
  43        }
  44        return 0;
  45}
  46
  47/*
  48 * lock for writing
  49 */
  50static inline void __down_write(struct rw_semaphore *sem)
  51{
  52        int tmp;
  53
  54        tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  55                                (atomic_t *)(&sem->count));
  56        if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  57                smp_wmb();
  58        else
  59                rwsem_down_write_failed(sem);
  60}
  61
  62static inline int __down_write_trylock(struct rw_semaphore *sem)
  63{
  64        int tmp;
  65
  66        tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  67                      RWSEM_ACTIVE_WRITE_BIAS);
  68        smp_wmb();
  69        return tmp == RWSEM_UNLOCKED_VALUE;
  70}
  71
  72/*
  73 * unlock after reading
  74 */
  75static inline void __up_read(struct rw_semaphore *sem)
  76{
  77        int tmp;
  78
  79        smp_wmb();
  80        tmp = atomic_dec_return((atomic_t *)(&sem->count));
  81        if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
  82                rwsem_wake(sem);
  83}
  84
  85/*
  86 * unlock after writing
  87 */
  88static inline void __up_write(struct rw_semaphore *sem)
  89{
  90        smp_wmb();
  91        if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  92                              (atomic_t *)(&sem->count)) < 0)
  93                rwsem_wake(sem);
  94}
  95
  96/*
  97 * implement atomic add functionality
  98 */
  99static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
 100{
 101        atomic_add(delta, (atomic_t *)(&sem->count));
 102}
 103
 104/*
 105 * downgrade write lock to read lock
 106 */
 107static inline void __downgrade_write(struct rw_semaphore *sem)
 108{
 109        int tmp;
 110
 111        smp_wmb();
 112        tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
 113        if (tmp < 0)
 114                rwsem_downgrade_wake(sem);
 115}
 116
 117static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 118{
 119        __down_write(sem);
 120}
 121
 122/*
 123 * implement exchange and add functionality
 124 */
 125static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
 126{
 127        smp_mb();
 128        return atomic_add_return(delta, (atomic_t *)(&sem->count));
 129}
 130
 131#endif /* __KERNEL__ */
 132#endif /* _ASM_SH_RWSEM_H */
 133