linux/arch/ia64/include/asm/rwsem.h
<<
>>
Prefs
   1/*
   2 * R/W semaphores for ia64
   3 *
   4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
   5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
   6 * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
   7 *
   8 * Based on asm-i386/rwsem.h and other architecture implementation.
   9 *
  10 * The MSW of the count is the negated number of active writers and
  11 * waiting lockers, and the LSW is the total number of active locks.
  12 *
  13 * The lock count is initialized to 0 (no active and no waiting lockers).
  14 *
  15 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
  16 * the case of an uncontended lock. Readers increment by 1 and see a positive
  17 * value when uncontended, negative if there are writers (and maybe) readers
  18 * waiting (in which case it goes to sleep).
  19 */
  20
  21#ifndef _ASM_IA64_RWSEM_H
  22#define _ASM_IA64_RWSEM_H
  23
  24#ifndef _LINUX_RWSEM_H
  25#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
  26#endif
  27
  28#include <linux/list.h>
  29#include <linux/spinlock.h>
  30
  31#include <asm/intrinsics.h>
  32
  33/*
  34 * the semaphore definition
  35 */
  36struct rw_semaphore {
  37        signed long             count;
  38        spinlock_t              wait_lock;
  39        struct list_head        wait_list;
  40};
  41
  42#define RWSEM_UNLOCKED_VALUE            __IA64_UL_CONST(0x0000000000000000)
  43#define RWSEM_ACTIVE_BIAS               __IA64_UL_CONST(0x0000000000000001)
  44#define RWSEM_ACTIVE_MASK               __IA64_UL_CONST(0x00000000ffffffff)
  45#define RWSEM_WAITING_BIAS              -__IA64_UL_CONST(0x0000000100000000)
  46#define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
  47#define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  48
  49#define __RWSEM_INITIALIZER(name) \
  50        { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
  51          LIST_HEAD_INIT((name).wait_list) }
  52
  53#define DECLARE_RWSEM(name) \
  54        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  55
  56extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
  57extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
  58extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
  59extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
  60
  61static inline void
  62init_rwsem (struct rw_semaphore *sem)
  63{
  64        sem->count = RWSEM_UNLOCKED_VALUE;
  65        spin_lock_init(&sem->wait_lock);
  66        INIT_LIST_HEAD(&sem->wait_list);
  67}
  68
  69/*
  70 * lock for reading
  71 */
  72static inline void
  73__down_read (struct rw_semaphore *sem)
  74{
  75        long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
  76
  77        if (result < 0)
  78                rwsem_down_read_failed(sem);
  79}
  80
  81/*
  82 * lock for writing
  83 */
  84static inline void
  85__down_write (struct rw_semaphore *sem)
  86{
  87        long old, new;
  88
  89        do {
  90                old = sem->count;
  91                new = old + RWSEM_ACTIVE_WRITE_BIAS;
  92        } while (cmpxchg_acq(&sem->count, old, new) != old);
  93
  94        if (old != 0)
  95                rwsem_down_write_failed(sem);
  96}
  97
  98/*
  99 * unlock after reading
 100 */
 101static inline void
 102__up_read (struct rw_semaphore *sem)
 103{
 104        long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
 105
 106        if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
 107                rwsem_wake(sem);
 108}
 109
 110/*
 111 * unlock after writing
 112 */
 113static inline void
 114__up_write (struct rw_semaphore *sem)
 115{
 116        long old, new;
 117
 118        do {
 119                old = sem->count;
 120                new = old - RWSEM_ACTIVE_WRITE_BIAS;
 121        } while (cmpxchg_rel(&sem->count, old, new) != old);
 122
 123        if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
 124                rwsem_wake(sem);
 125}
 126
 127/*
 128 * trylock for reading -- returns 1 if successful, 0 if contention
 129 */
 130static inline int
 131__down_read_trylock (struct rw_semaphore *sem)
 132{
 133        long tmp;
 134        while ((tmp = sem->count) >= 0) {
 135                if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
 136                        return 1;
 137                }
 138        }
 139        return 0;
 140}
 141
 142/*
 143 * trylock for writing -- returns 1 if successful, 0 if contention
 144 */
 145static inline int
 146__down_write_trylock (struct rw_semaphore *sem)
 147{
 148        long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
 149                              RWSEM_ACTIVE_WRITE_BIAS);
 150        return tmp == RWSEM_UNLOCKED_VALUE;
 151}
 152
 153/*
 154 * downgrade write lock to read lock
 155 */
 156static inline void
 157__downgrade_write (struct rw_semaphore *sem)
 158{
 159        long old, new;
 160
 161        do {
 162                old = sem->count;
 163                new = old - RWSEM_WAITING_BIAS;
 164        } while (cmpxchg_rel(&sem->count, old, new) != old);
 165
 166        if (old < 0)
 167                rwsem_downgrade_wake(sem);
 168}
 169
 170/*
 171 * Implement atomic add functionality.  These used to be "inline" functions, but GCC v3.1
 172 * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
 173 */
 174#define rwsem_atomic_add(delta, sem)    atomic64_add(delta, (atomic64_t *)(&(sem)->count))
 175#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
 176
 177static inline int rwsem_is_locked(struct rw_semaphore *sem)
 178{
 179        return (sem->count != 0);
 180}
 181
 182#endif /* _ASM_IA64_RWSEM_H */
 183