linux/include/asm-xtensa/rwsem.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/rwsem.h
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Largely copied from include/asm-ppc/rwsem.h
   9 *
  10 * Copyright (C) 2001 - 2005 Tensilica Inc.
  11 */
  12
  13#ifndef _XTENSA_RWSEM_H
  14#define _XTENSA_RWSEM_H
  15
  16#include <linux/list.h>
  17#include <linux/spinlock.h>
  18#include <asm/atomic.h>
  19#include <asm/system.h>
  20
  21/*
  22 * the semaphore definition
  23 */
  24struct rw_semaphore {
  25        signed long             count;
  26#define RWSEM_UNLOCKED_VALUE            0x00000000
  27#define RWSEM_ACTIVE_BIAS               0x00000001
  28#define RWSEM_ACTIVE_MASK               0x0000ffff
  29#define RWSEM_WAITING_BIAS              (-0x00010000)
  30#define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
  31#define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  32        spinlock_t              wait_lock;
  33        struct list_head        wait_list;
  34};
  35
  36#define __RWSEM_INITIALIZER(name) \
  37        { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  38          LIST_HEAD_INIT((name).wait_list) }
  39
  40#define DECLARE_RWSEM(name)             \
  41        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  42
  43extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  44extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  45extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  46extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  47
  48static inline void init_rwsem(struct rw_semaphore *sem)
  49{
  50        sem->count = RWSEM_UNLOCKED_VALUE;
  51        spin_lock_init(&sem->wait_lock);
  52        INIT_LIST_HEAD(&sem->wait_list);
  53}
  54
  55/*
  56 * lock for reading
  57 */
  58static inline void __down_read(struct rw_semaphore *sem)
  59{
  60        if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
  61                smp_wmb();
  62        else
  63                rwsem_down_read_failed(sem);
  64}
  65
  66static inline int __down_read_trylock(struct rw_semaphore *sem)
  67{
  68        int tmp;
  69
  70        while ((tmp = sem->count) >= 0) {
  71                if (tmp == cmpxchg(&sem->count, tmp,
  72                                   tmp + RWSEM_ACTIVE_READ_BIAS)) {
  73                        smp_wmb();
  74                        return 1;
  75                }
  76        }
  77        return 0;
  78}
  79
  80/*
  81 * lock for writing
  82 */
  83static inline void __down_write(struct rw_semaphore *sem)
  84{
  85        int tmp;
  86
  87        tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  88                                (atomic_t *)(&sem->count));
  89        if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  90                smp_wmb();
  91        else
  92                rwsem_down_write_failed(sem);
  93}
  94
  95static inline int __down_write_trylock(struct rw_semaphore *sem)
  96{
  97        int tmp;
  98
  99        tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
 100                      RWSEM_ACTIVE_WRITE_BIAS);
 101        smp_wmb();
 102        return tmp == RWSEM_UNLOCKED_VALUE;
 103}
 104
 105/*
 106 * unlock after reading
 107 */
 108static inline void __up_read(struct rw_semaphore *sem)
 109{
 110        int tmp;
 111
 112        smp_wmb();
 113        tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
 114        if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
 115                rwsem_wake(sem);
 116}
 117
 118/*
 119 * unlock after writing
 120 */
 121static inline void __up_write(struct rw_semaphore *sem)
 122{
 123        smp_wmb();
 124        if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
 125                              (atomic_t *)(&sem->count)) < 0)
 126                rwsem_wake(sem);
 127}
 128
 129/*
 130 * implement atomic add functionality
 131 */
 132static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
 133{
 134        atomic_add(delta, (atomic_t *)(&sem->count));
 135}
 136
 137/*
 138 * downgrade write lock to read lock
 139 */
 140static inline void __downgrade_write(struct rw_semaphore *sem)
 141{
 142        int tmp;
 143
 144        smp_wmb();
 145        tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
 146        if (tmp < 0)
 147                rwsem_downgrade_wake(sem);
 148}
 149
 150/*
 151 * implement exchange and add functionality
 152 */
 153static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
 154{
 155        smp_mb();
 156        return atomic_add_return(delta, (atomic_t *)(&sem->count));
 157}
 158
 159static inline int rwsem_is_locked(struct rw_semaphore *sem)
 160{
 161        return (sem->count != 0);
 162}
 163
 164#endif  /* _XTENSA_RWSEM_H */
 165