linux/arch/s390/include/asm/rwsem.h
<<
>>
Prefs
   1#ifndef _S390_RWSEM_H
   2#define _S390_RWSEM_H
   3
   4/*
   5 *  include/asm-s390/rwsem.h
   6 *
   7 *  S390 version
   8 *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
   9 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  10 *
  11 *  Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
  12 */
  13
  14/*
  15 *
  16 * The MSW of the count is the negated number of active writers and waiting
  17 * lockers, and the LSW is the total number of active locks
  18 *
  19 * The lock count is initialized to 0 (no active and no waiting lockers).
  20 *
  21 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
  22 * uncontended lock. This can be determined because XADD returns the old value.
  23 * Readers increment by 1 and see a positive value when uncontended, negative
  24 * if there are writers (and maybe) readers waiting (in which case it goes to
  25 * sleep).
  26 *
  27 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
  28 * be extended to 65534 by manually checking the whole MSW rather than relying
  29 * on the S flag.
  30 *
  31 * The value of ACTIVE_BIAS supports up to 65535 active processes.
  32 *
  33 * This should be totally fair - if anything is waiting, a process that wants a
  34 * lock will go to the back of the queue. When the currently active lock is
  35 * released, if there's a writer at the front of the queue, then that and only
  36 * that will be woken up; if there's a bunch of consequtive readers at the
  37 * front, then they'll all be woken up, but no other readers will be.
  38 */
  39
  40#ifndef _LINUX_RWSEM_H
  41#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
  42#endif
  43
  44#ifdef __KERNEL__
  45
  46#include <linux/list.h>
  47#include <linux/spinlock.h>
  48
  49struct rwsem_waiter;
  50
  51extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
  52extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
  53extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
  54extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
  55extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
  56
  57/*
  58 * the semaphore definition
  59 */
  60struct rw_semaphore {
  61        signed long             count;
  62        spinlock_t              wait_lock;
  63        struct list_head        wait_list;
  64#ifdef CONFIG_DEBUG_LOCK_ALLOC
  65        struct lockdep_map      dep_map;
  66#endif
  67};
  68
  69#ifndef __s390x__
  70#define RWSEM_UNLOCKED_VALUE    0x00000000
  71#define RWSEM_ACTIVE_BIAS       0x00000001
  72#define RWSEM_ACTIVE_MASK       0x0000ffff
  73#define RWSEM_WAITING_BIAS      (-0x00010000)
  74#else /* __s390x__ */
  75#define RWSEM_UNLOCKED_VALUE    0x0000000000000000L
  76#define RWSEM_ACTIVE_BIAS       0x0000000000000001L
  77#define RWSEM_ACTIVE_MASK       0x00000000ffffffffL
  78#define RWSEM_WAITING_BIAS      (-0x0000000100000000L)
  79#endif /* __s390x__ */
  80#define RWSEM_ACTIVE_READ_BIAS  RWSEM_ACTIVE_BIAS
  81#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  82
  83/*
  84 * initialisation
  85 */
  86
  87#ifdef CONFIG_DEBUG_LOCK_ALLOC
  88# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
  89#else
  90# define __RWSEM_DEP_MAP_INIT(lockname)
  91#endif
  92
  93#define __RWSEM_INITIALIZER(name) \
  94 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
  95   LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
  96
  97#define DECLARE_RWSEM(name) \
  98        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
  99
 100static inline void init_rwsem(struct rw_semaphore *sem)
 101{
 102        sem->count = RWSEM_UNLOCKED_VALUE;
 103        spin_lock_init(&sem->wait_lock);
 104        INIT_LIST_HEAD(&sem->wait_list);
 105}
 106
 107extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
 108                         struct lock_class_key *key);
 109
 110#define init_rwsem(sem)                         \
 111do {                                            \
 112        static struct lock_class_key __key;     \
 113                                                \
 114        __init_rwsem((sem), #sem, &__key);      \
 115} while (0)
 116
 117
 118/*
 119 * lock for reading
 120 */
 121static inline void __down_read(struct rw_semaphore *sem)
 122{
 123        signed long old, new;
 124
 125        asm volatile(
 126#ifndef __s390x__
 127                "       l       %0,0(%3)\n"
 128                "0:     lr      %1,%0\n"
 129                "       ahi     %1,%5\n"
 130                "       cs      %0,%1,0(%3)\n"
 131                "       jl      0b"
 132#else /* __s390x__ */
 133                "       lg      %0,0(%3)\n"
 134                "0:     lgr     %1,%0\n"
 135                "       aghi    %1,%5\n"
 136                "       csg     %0,%1,0(%3)\n"
 137                "       jl      0b"
 138#endif /* __s390x__ */
 139                : "=&d" (old), "=&d" (new), "=m" (sem->count)
 140                : "a" (&sem->count), "m" (sem->count),
 141                  "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
 142        if (old < 0)
 143                rwsem_down_read_failed(sem);
 144}
 145
 146/*
 147 * trylock for reading -- returns 1 if successful, 0 if contention
 148 */
 149static inline int __down_read_trylock(struct rw_semaphore *sem)
 150{
 151        signed long old, new;
 152
 153        asm volatile(
 154#ifndef __s390x__
 155                "       l       %0,0(%3)\n"
 156                "0:     ltr     %1,%0\n"
 157                "       jm      1f\n"
 158                "       ahi     %1,%5\n"
 159                "       cs      %0,%1,0(%3)\n"
 160                "       jl      0b\n"
 161                "1:"
 162#else /* __s390x__ */
 163                "       lg      %0,0(%3)\n"
 164                "0:     ltgr    %1,%0\n"
 165                "       jm      1f\n"
 166                "       aghi    %1,%5\n"
 167                "       csg     %0,%1,0(%3)\n"
 168                "       jl      0b\n"
 169                "1:"
 170#endif /* __s390x__ */
 171                : "=&d" (old), "=&d" (new), "=m" (sem->count)
 172                : "a" (&sem->count), "m" (sem->count),
 173                  "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory");
 174        return old >= 0 ? 1 : 0;
 175}
 176
 177/*
 178 * lock for writing
 179 */
 180static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
 181{
 182        signed long old, new, tmp;
 183
 184        tmp = RWSEM_ACTIVE_WRITE_BIAS;
 185        asm volatile(
 186#ifndef __s390x__
 187                "       l       %0,0(%3)\n"
 188                "0:     lr      %1,%0\n"
 189                "       a       %1,%5\n"
 190                "       cs      %0,%1,0(%3)\n"
 191                "       jl      0b"
 192#else /* __s390x__ */
 193                "       lg      %0,0(%3)\n"
 194                "0:     lgr     %1,%0\n"
 195                "       ag      %1,%5\n"
 196                "       csg     %0,%1,0(%3)\n"
 197                "       jl      0b"
 198#endif /* __s390x__ */
 199                : "=&d" (old), "=&d" (new), "=m" (sem->count)
 200                : "a" (&sem->count), "m" (sem->count), "m" (tmp)
 201                : "cc", "memory");
 202        if (old != 0)
 203                rwsem_down_write_failed(sem);
 204}
 205
 206static inline void __down_write(struct rw_semaphore *sem)
 207{
 208        __down_write_nested(sem, 0);
 209}
 210
 211/*
 212 * trylock for writing -- returns 1 if successful, 0 if contention
 213 */
 214static inline int __down_write_trylock(struct rw_semaphore *sem)
 215{
 216        signed long old;
 217
 218        asm volatile(
 219#ifndef __s390x__
 220                "       l       %0,0(%2)\n"
 221                "0:     ltr     %0,%0\n"
 222                "       jnz     1f\n"
 223                "       cs      %0,%4,0(%2)\n"
 224                "       jl      0b\n"
 225#else /* __s390x__ */
 226                "       lg      %0,0(%2)\n"
 227                "0:     ltgr    %0,%0\n"
 228                "       jnz     1f\n"
 229                "       csg     %0,%4,0(%2)\n"
 230                "       jl      0b\n"
 231#endif /* __s390x__ */
 232                "1:"
 233                : "=&d" (old), "=m" (sem->count)
 234                : "a" (&sem->count), "m" (sem->count),
 235                  "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory");
 236        return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
 237}
 238
 239/*
 240 * unlock after reading
 241 */
 242static inline void __up_read(struct rw_semaphore *sem)
 243{
 244        signed long old, new;
 245
 246        asm volatile(
 247#ifndef __s390x__
 248                "       l       %0,0(%3)\n"
 249                "0:     lr      %1,%0\n"
 250                "       ahi     %1,%5\n"
 251                "       cs      %0,%1,0(%3)\n"
 252                "       jl      0b"
 253#else /* __s390x__ */
 254                "       lg      %0,0(%3)\n"
 255                "0:     lgr     %1,%0\n"
 256                "       aghi    %1,%5\n"
 257                "       csg     %0,%1,0(%3)\n"
 258                "       jl      0b"
 259#endif /* __s390x__ */
 260                : "=&d" (old), "=&d" (new), "=m" (sem->count)
 261                : "a" (&sem->count), "m" (sem->count),
 262                  "i" (-RWSEM_ACTIVE_READ_BIAS)
 263                : "cc", "memory");
 264        if (new < 0)
 265                if ((new & RWSEM_ACTIVE_MASK) == 0)
 266                        rwsem_wake(sem);
 267}
 268
 269/*
 270 * unlock after writing
 271 */
 272static inline void __up_write(struct rw_semaphore *sem)
 273{
 274        signed long old, new, tmp;
 275
 276        tmp = -RWSEM_ACTIVE_WRITE_BIAS;
 277        asm volatile(
 278#ifndef __s390x__
 279                "       l       %0,0(%3)\n"
 280                "0:     lr      %1,%0\n"
 281                "       a       %1,%5\n"
 282                "       cs      %0,%1,0(%3)\n"
 283                "       jl      0b"
 284#else /* __s390x__ */
 285                "       lg      %0,0(%3)\n"
 286                "0:     lgr     %1,%0\n"
 287                "       ag      %1,%5\n"
 288                "       csg     %0,%1,0(%3)\n"
 289                "       jl      0b"
 290#endif /* __s390x__ */
 291                : "=&d" (old), "=&d" (new), "=m" (sem->count)
 292                : "a" (&sem->count), "m" (sem->count), "m" (tmp)
 293                : "cc", "memory");
 294        if (new < 0)
 295                if ((new & RWSEM_ACTIVE_MASK) == 0)
 296                        rwsem_wake(sem);
 297}
 298
 299/*
 300 * downgrade write lock to read lock
 301 */
 302static inline void __downgrade_write(struct rw_semaphore *sem)
 303{
 304        signed long old, new, tmp;
 305
 306        tmp = -RWSEM_WAITING_BIAS;
 307        asm volatile(
 308#ifndef __s390x__
 309                "       l       %0,0(%3)\n"
 310                "0:     lr      %1,%0\n"
 311                "       a       %1,%5\n"
 312                "       cs      %0,%1,0(%3)\n"
 313                "       jl      0b"
 314#else /* __s390x__ */
 315                "       lg      %0,0(%3)\n"
 316                "0:     lgr     %1,%0\n"
 317                "       ag      %1,%5\n"
 318                "       csg     %0,%1,0(%3)\n"
 319                "       jl      0b"
 320#endif /* __s390x__ */
 321                : "=&d" (old), "=&d" (new), "=m" (sem->count)
 322                : "a" (&sem->count), "m" (sem->count), "m" (tmp)
 323                : "cc", "memory");
 324        if (new > 1)
 325                rwsem_downgrade_wake(sem);
 326}
 327
 328/*
 329 * implement atomic add functionality
 330 */
 331static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
 332{
 333        signed long old, new;
 334
 335        asm volatile(
 336#ifndef __s390x__
 337                "       l       %0,0(%3)\n"
 338                "0:     lr      %1,%0\n"
 339                "       ar      %1,%5\n"
 340                "       cs      %0,%1,0(%3)\n"
 341                "       jl      0b"
 342#else /* __s390x__ */
 343                "       lg      %0,0(%3)\n"
 344                "0:     lgr     %1,%0\n"
 345                "       agr     %1,%5\n"
 346                "       csg     %0,%1,0(%3)\n"
 347                "       jl      0b"
 348#endif /* __s390x__ */
 349                : "=&d" (old), "=&d" (new), "=m" (sem->count)
 350                : "a" (&sem->count), "m" (sem->count), "d" (delta)
 351                : "cc", "memory");
 352}
 353
 354/*
 355 * implement exchange and add functionality
 356 */
 357static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
 358{
 359        signed long old, new;
 360
 361        asm volatile(
 362#ifndef __s390x__
 363                "       l       %0,0(%3)\n"
 364                "0:     lr      %1,%0\n"
 365                "       ar      %1,%5\n"
 366                "       cs      %0,%1,0(%3)\n"
 367                "       jl      0b"
 368#else /* __s390x__ */
 369                "       lg      %0,0(%3)\n"
 370                "0:     lgr     %1,%0\n"
 371                "       agr     %1,%5\n"
 372                "       csg     %0,%1,0(%3)\n"
 373                "       jl      0b"
 374#endif /* __s390x__ */
 375                : "=&d" (old), "=&d" (new), "=m" (sem->count)
 376                : "a" (&sem->count), "m" (sem->count), "d" (delta)
 377                : "cc", "memory");
 378        return new;
 379}
 380
 381static inline int rwsem_is_locked(struct rw_semaphore *sem)
 382{
 383        return (sem->count != 0);
 384}
 385
 386#endif /* __KERNEL__ */
 387#endif /* _S390_RWSEM_H */
 388