linux/arch/ia64/include/asm/rwsem.h
<<
>>
Prefs
   1/*
   2 * R/W semaphores for ia64
   3 *
   4 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
   5 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
   6 * Copyright (C) 2005 Christoph Lameter <clameter@sgi.com>
   7 *
   8 * Based on asm-i386/rwsem.h and other architecture implementation.
   9 *
  10 * The MSW of the count is the negated number of active writers and
  11 * waiting lockers, and the LSW is the total number of active locks.
  12 *
  13 * The lock count is initialized to 0 (no active and no waiting lockers).
  14 *
  15 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
  16 * the case of an uncontended lock. Readers increment by 1 and see a positive
  17 * value when uncontended, negative if there are writers (and maybe) readers
  18 * waiting (in which case it goes to sleep).
  19 */
  20
  21#ifndef _ASM_IA64_RWSEM_H
  22#define _ASM_IA64_RWSEM_H
  23
  24#ifndef _LINUX_RWSEM_H
  25#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
  26#endif
  27
  28#include <asm/intrinsics.h>
  29
  30#define RWSEM_UNLOCKED_VALUE            __IA64_UL_CONST(0x0000000000000000)
  31#define RWSEM_ACTIVE_BIAS               (1L)
  32#define RWSEM_ACTIVE_MASK               (0xffffffffL)
  33#define RWSEM_WAITING_BIAS              (-0x100000000L)
  34#define RWSEM_ACTIVE_READ_BIAS          RWSEM_ACTIVE_BIAS
  35#define RWSEM_ACTIVE_WRITE_BIAS         (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  36
  37/*
  38 * lock for reading
  39 */
  40static inline void
  41__down_read (struct rw_semaphore *sem)
  42{
  43        long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
  44
  45        if (result < 0)
  46                rwsem_down_read_failed(sem);
  47}
  48
  49/*
  50 * lock for writing
  51 */
  52static inline void
  53__down_write (struct rw_semaphore *sem)
  54{
  55        long old, new;
  56
  57        do {
  58                old = sem->count;
  59                new = old + RWSEM_ACTIVE_WRITE_BIAS;
  60        } while (cmpxchg_acq(&sem->count, old, new) != old);
  61
  62        if (old != 0)
  63                rwsem_down_write_failed(sem);
  64}
  65
  66/*
  67 * unlock after reading
  68 */
  69static inline void
  70__up_read (struct rw_semaphore *sem)
  71{
  72        long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
  73
  74        if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
  75                rwsem_wake(sem);
  76}
  77
  78/*
  79 * unlock after writing
  80 */
  81static inline void
  82__up_write (struct rw_semaphore *sem)
  83{
  84        long old, new;
  85
  86        do {
  87                old = sem->count;
  88                new = old - RWSEM_ACTIVE_WRITE_BIAS;
  89        } while (cmpxchg_rel(&sem->count, old, new) != old);
  90
  91        if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
  92                rwsem_wake(sem);
  93}
  94
  95/*
  96 * trylock for reading -- returns 1 if successful, 0 if contention
  97 */
  98static inline int
  99__down_read_trylock (struct rw_semaphore *sem)
 100{
 101        long tmp;
 102        while ((tmp = sem->count) >= 0) {
 103                if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
 104                        return 1;
 105                }
 106        }
 107        return 0;
 108}
 109
 110/*
 111 * trylock for writing -- returns 1 if successful, 0 if contention
 112 */
 113static inline int
 114__down_write_trylock (struct rw_semaphore *sem)
 115{
 116        long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
 117                              RWSEM_ACTIVE_WRITE_BIAS);
 118        return tmp == RWSEM_UNLOCKED_VALUE;
 119}
 120
 121/*
 122 * downgrade write lock to read lock
 123 */
 124static inline void
 125__downgrade_write (struct rw_semaphore *sem)
 126{
 127        long old, new;
 128
 129        do {
 130                old = sem->count;
 131                new = old - RWSEM_WAITING_BIAS;
 132        } while (cmpxchg_rel(&sem->count, old, new) != old);
 133
 134        if (old < 0)
 135                rwsem_downgrade_wake(sem);
 136}
 137
 138/*
 139 * Implement atomic add functionality.  These used to be "inline" functions, but GCC v3.1
 140 * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
 141 */
 142#define rwsem_atomic_add(delta, sem)    atomic64_add(delta, (atomic64_t *)(&(sem)->count))
 143#define rwsem_atomic_update(delta, sem) atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
 144
 145#endif /* _ASM_IA64_RWSEM_H */
 146