linux/drivers/tty/tty_ldsem.c
<<
>>
Prefs
   1/*
   2 * Ldisc rw semaphore
   3 *
   4 * The ldisc semaphore is semantically a rw_semaphore but which enforces
   5 * an alternate policy, namely:
   6 *   1) Supports lock wait timeouts
   7 *   2) Write waiter has priority
   8 *   3) Downgrading is not supported
   9 *
  10 * Implementation notes:
  11 *   1) Upper half of semaphore count is a wait count (differs from rwsem
  12 *      in that rwsem normalizes the upper half to the wait bias)
  13 *   2) Lacks overflow checking
  14 *
  15 * The generic counting was copied and modified from include/asm-generic/rwsem.h
  16 * by Paul Mackerras <paulus@samba.org>.
  17 *
  18 * The scheduling policy was copied and modified from lib/rwsem.c
  19 * Written by David Howells (dhowells@redhat.com).
  20 *
  21 * This implementation incorporates the write lock stealing work of
  22 * Michel Lespinasse <walken@google.com>.
  23 *
  24 * Copyright (C) 2013 Peter Hurley <peter@hurleysoftware.com>
  25 *
  26 * This file may be redistributed under the terms of the GNU General Public
  27 * License v2.
  28 */
  29
  30#include <linux/list.h>
  31#include <linux/spinlock.h>
  32#include <linux/atomic.h>
  33#include <linux/tty.h>
  34#include <linux/sched.h>
  35
  36
  37#ifdef CONFIG_DEBUG_LOCK_ALLOC
  38# define __acq(l, s, t, r, c, n, i)             \
  39                                lock_acquire(&(l)->dep_map, s, t, r, c, n, i)
  40# define __rel(l, n, i)                         \
  41                                lock_release(&(l)->dep_map, n, i)
  42#define lockdep_acquire(l, s, t, i)             __acq(l, s, t, 0, 1, NULL, i)
  43#define lockdep_acquire_nest(l, s, t, n, i)     __acq(l, s, t, 0, 1, n, i)
  44#define lockdep_acquire_read(l, s, t, i)        __acq(l, s, t, 1, 1, NULL, i)
  45#define lockdep_release(l, n, i)                __rel(l, n, i)
  46#else
  47# define lockdep_acquire(l, s, t, i)            do { } while (0)
  48# define lockdep_acquire_nest(l, s, t, n, i)    do { } while (0)
  49# define lockdep_acquire_read(l, s, t, i)       do { } while (0)
  50# define lockdep_release(l, n, i)               do { } while (0)
  51#endif
  52
  53#ifdef CONFIG_LOCK_STAT
  54# define lock_stat(_lock, stat)         lock_##stat(&(_lock)->dep_map, _RET_IP_)
  55#else
  56# define lock_stat(_lock, stat)         do { } while (0)
  57#endif
  58
  59
  60#if BITS_PER_LONG == 64
  61# define LDSEM_ACTIVE_MASK      0xffffffffL
  62#else
  63# define LDSEM_ACTIVE_MASK      0x0000ffffL
  64#endif
  65
  66#define LDSEM_UNLOCKED          0L
  67#define LDSEM_ACTIVE_BIAS       1L
  68#define LDSEM_WAIT_BIAS         (-LDSEM_ACTIVE_MASK-1)
  69#define LDSEM_READ_BIAS         LDSEM_ACTIVE_BIAS
  70#define LDSEM_WRITE_BIAS        (LDSEM_WAIT_BIAS + LDSEM_ACTIVE_BIAS)
  71
  72struct ldsem_waiter {
  73        struct list_head list;
  74        struct task_struct *task;
  75};
  76
  77static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
  78{
  79        return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
  80}
  81
  82/*
  83 * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
  84 * Returns 1 if count was successfully changed; @*old will have @new value.
  85 * Returns 0 if count was not changed; @*old will have most recent sem->count
  86 */
  87static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
  88{
  89        long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
  90        if (tmp == *old) {
  91                *old = new;
  92                return 1;
  93        } else {
  94                *old = tmp;
  95                return 0;
  96        }
  97}
  98
  99/*
 100 * Initialize an ldsem:
 101 */
 102void __init_ldsem(struct ld_semaphore *sem, const char *name,
 103                  struct lock_class_key *key)
 104{
 105#ifdef CONFIG_DEBUG_LOCK_ALLOC
 106        /*
 107         * Make sure we are not reinitializing a held semaphore:
 108         */
 109        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
 110        lockdep_init_map(&sem->dep_map, name, key, 0);
 111#endif
 112        sem->count = LDSEM_UNLOCKED;
 113        sem->wait_readers = 0;
 114        raw_spin_lock_init(&sem->wait_lock);
 115        INIT_LIST_HEAD(&sem->read_wait);
 116        INIT_LIST_HEAD(&sem->write_wait);
 117}
 118
 119static void __ldsem_wake_readers(struct ld_semaphore *sem)
 120{
 121        struct ldsem_waiter *waiter, *next;
 122        struct task_struct *tsk;
 123        long adjust, count;
 124
 125        /* Try to grant read locks to all readers on the read wait list.
 126         * Note the 'active part' of the count is incremented by
 127         * the number of readers before waking any processes up.
 128         */
 129        adjust = sem->wait_readers * (LDSEM_ACTIVE_BIAS - LDSEM_WAIT_BIAS);
 130        count = ldsem_atomic_update(adjust, sem);
 131        do {
 132                if (count > 0)
 133                        break;
 134                if (ldsem_cmpxchg(&count, count - adjust, sem))
 135                        return;
 136        } while (1);
 137
 138        list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
 139                tsk = waiter->task;
 140                smp_mb();
 141                waiter->task = NULL;
 142                wake_up_process(tsk);
 143                put_task_struct(tsk);
 144        }
 145        INIT_LIST_HEAD(&sem->read_wait);
 146        sem->wait_readers = 0;
 147}
 148
 149static inline int writer_trylock(struct ld_semaphore *sem)
 150{
 151        /* only wake this writer if the active part of the count can be
 152         * transitioned from 0 -> 1
 153         */
 154        long count = ldsem_atomic_update(LDSEM_ACTIVE_BIAS, sem);
 155        do {
 156                if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS)
 157                        return 1;
 158                if (ldsem_cmpxchg(&count, count - LDSEM_ACTIVE_BIAS, sem))
 159                        return 0;
 160        } while (1);
 161}
 162
 163static void __ldsem_wake_writer(struct ld_semaphore *sem)
 164{
 165        struct ldsem_waiter *waiter;
 166
 167        waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list);
 168        wake_up_process(waiter->task);
 169}
 170
 171/*
 172 * handle the lock release when processes blocked on it that can now run
 173 * - if we come here from up_xxxx(), then:
 174 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
 175 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
 176 * - the spinlock must be held by the caller
 177 * - woken process blocks are discarded from the list after having task zeroed
 178 */
 179static void __ldsem_wake(struct ld_semaphore *sem)
 180{
 181        if (!list_empty(&sem->write_wait))
 182                __ldsem_wake_writer(sem);
 183        else if (!list_empty(&sem->read_wait))
 184                __ldsem_wake_readers(sem);
 185}
 186
 187static void ldsem_wake(struct ld_semaphore *sem)
 188{
 189        unsigned long flags;
 190
 191        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 192        __ldsem_wake(sem);
 193        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 194}
 195
 196/*
 197 * wait for the read lock to be granted
 198 */
 199static struct ld_semaphore __sched *
 200down_read_failed(struct ld_semaphore *sem, long count, long timeout)
 201{
 202        struct ldsem_waiter waiter;
 203        struct task_struct *tsk = current;
 204        long adjust = -LDSEM_ACTIVE_BIAS + LDSEM_WAIT_BIAS;
 205
 206        /* set up my own style of waitqueue */
 207        raw_spin_lock_irq(&sem->wait_lock);
 208
 209        /* Try to reverse the lock attempt but if the count has changed
 210         * so that reversing fails, check if there are are no waiters,
 211         * and early-out if not */
 212        do {
 213                if (ldsem_cmpxchg(&count, count + adjust, sem))
 214                        break;
 215                if (count > 0) {
 216                        raw_spin_unlock_irq(&sem->wait_lock);
 217                        return sem;
 218                }
 219        } while (1);
 220
 221        list_add_tail(&waiter.list, &sem->read_wait);
 222        sem->wait_readers++;
 223
 224        waiter.task = tsk;
 225        get_task_struct(tsk);
 226
 227        /* if there are no active locks, wake the new lock owner(s) */
 228        if ((count & LDSEM_ACTIVE_MASK) == 0)
 229                __ldsem_wake(sem);
 230
 231        raw_spin_unlock_irq(&sem->wait_lock);
 232
 233        /* wait to be given the lock */
 234        for (;;) {
 235                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 236
 237                if (!waiter.task)
 238                        break;
 239                if (!timeout)
 240                        break;
 241                timeout = schedule_timeout(timeout);
 242        }
 243
 244        __set_task_state(tsk, TASK_RUNNING);
 245
 246        if (!timeout) {
 247                /* lock timed out but check if this task was just
 248                 * granted lock ownership - if so, pretend there
 249                 * was no timeout; otherwise, cleanup lock wait */
 250                raw_spin_lock_irq(&sem->wait_lock);
 251                if (waiter.task) {
 252                        ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
 253                        list_del(&waiter.list);
 254                        raw_spin_unlock_irq(&sem->wait_lock);
 255                        put_task_struct(waiter.task);
 256                        return NULL;
 257                }
 258                raw_spin_unlock_irq(&sem->wait_lock);
 259        }
 260
 261        return sem;
 262}
 263
 264/*
 265 * wait for the write lock to be granted
 266 */
 267static struct ld_semaphore __sched *
 268down_write_failed(struct ld_semaphore *sem, long count, long timeout)
 269{
 270        struct ldsem_waiter waiter;
 271        struct task_struct *tsk = current;
 272        long adjust = -LDSEM_ACTIVE_BIAS;
 273        int locked = 0;
 274
 275        /* set up my own style of waitqueue */
 276        raw_spin_lock_irq(&sem->wait_lock);
 277
 278        /* Try to reverse the lock attempt but if the count has changed
 279         * so that reversing fails, check if the lock is now owned,
 280         * and early-out if so */
 281        do {
 282                if (ldsem_cmpxchg(&count, count + adjust, sem))
 283                        break;
 284                if ((count & LDSEM_ACTIVE_MASK) == LDSEM_ACTIVE_BIAS) {
 285                        raw_spin_unlock_irq(&sem->wait_lock);
 286                        return sem;
 287                }
 288        } while (1);
 289
 290        list_add_tail(&waiter.list, &sem->write_wait);
 291
 292        waiter.task = tsk;
 293
 294        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 295        for (;;) {
 296                if (!timeout)
 297                        break;
 298                raw_spin_unlock_irq(&sem->wait_lock);
 299                timeout = schedule_timeout(timeout);
 300                raw_spin_lock_irq(&sem->wait_lock);
 301                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 302                locked = writer_trylock(sem);
 303                if (locked)
 304                        break;
 305        }
 306
 307        if (!locked)
 308                ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
 309        list_del(&waiter.list);
 310        raw_spin_unlock_irq(&sem->wait_lock);
 311
 312        __set_task_state(tsk, TASK_RUNNING);
 313
 314        /* lock wait may have timed out */
 315        if (!locked)
 316                return NULL;
 317        return sem;
 318}
 319
 320
 321
 322static int __ldsem_down_read_nested(struct ld_semaphore *sem,
 323                                           int subclass, long timeout)
 324{
 325        long count;
 326
 327        lockdep_acquire_read(sem, subclass, 0, _RET_IP_);
 328
 329        count = ldsem_atomic_update(LDSEM_READ_BIAS, sem);
 330        if (count <= 0) {
 331                lock_stat(sem, contended);
 332                if (!down_read_failed(sem, count, timeout)) {
 333                        lockdep_release(sem, 1, _RET_IP_);
 334                        return 0;
 335                }
 336        }
 337        lock_stat(sem, acquired);
 338        return 1;
 339}
 340
 341static int __ldsem_down_write_nested(struct ld_semaphore *sem,
 342                                            int subclass, long timeout)
 343{
 344        long count;
 345
 346        lockdep_acquire(sem, subclass, 0, _RET_IP_);
 347
 348        count = ldsem_atomic_update(LDSEM_WRITE_BIAS, sem);
 349        if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) {
 350                lock_stat(sem, contended);
 351                if (!down_write_failed(sem, count, timeout)) {
 352                        lockdep_release(sem, 1, _RET_IP_);
 353                        return 0;
 354                }
 355        }
 356        lock_stat(sem, acquired);
 357        return 1;
 358}
 359
 360
 361/*
 362 * lock for reading -- returns 1 if successful, 0 if timed out
 363 */
 364int __sched ldsem_down_read(struct ld_semaphore *sem, long timeout)
 365{
 366        might_sleep();
 367        return __ldsem_down_read_nested(sem, 0, timeout);
 368}
 369
 370/*
 371 * trylock for reading -- returns 1 if successful, 0 if contention
 372 */
 373int ldsem_down_read_trylock(struct ld_semaphore *sem)
 374{
 375        long count = sem->count;
 376
 377        while (count >= 0) {
 378                if (ldsem_cmpxchg(&count, count + LDSEM_READ_BIAS, sem)) {
 379                        lockdep_acquire_read(sem, 0, 1, _RET_IP_);
 380                        lock_stat(sem, acquired);
 381                        return 1;
 382                }
 383        }
 384        return 0;
 385}
 386
 387/*
 388 * lock for writing -- returns 1 if successful, 0 if timed out
 389 */
 390int __sched ldsem_down_write(struct ld_semaphore *sem, long timeout)
 391{
 392        might_sleep();
 393        return __ldsem_down_write_nested(sem, 0, timeout);
 394}
 395
 396/*
 397 * trylock for writing -- returns 1 if successful, 0 if contention
 398 */
 399int ldsem_down_write_trylock(struct ld_semaphore *sem)
 400{
 401        long count = sem->count;
 402
 403        while ((count & LDSEM_ACTIVE_MASK) == 0) {
 404                if (ldsem_cmpxchg(&count, count + LDSEM_WRITE_BIAS, sem)) {
 405                        lockdep_acquire(sem, 0, 1, _RET_IP_);
 406                        lock_stat(sem, acquired);
 407                        return 1;
 408                }
 409        }
 410        return 0;
 411}
 412
 413/*
 414 * release a read lock
 415 */
 416void ldsem_up_read(struct ld_semaphore *sem)
 417{
 418        long count;
 419
 420        lockdep_release(sem, 1, _RET_IP_);
 421
 422        count = ldsem_atomic_update(-LDSEM_READ_BIAS, sem);
 423        if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0)
 424                ldsem_wake(sem);
 425}
 426
 427/*
 428 * release a write lock
 429 */
 430void ldsem_up_write(struct ld_semaphore *sem)
 431{
 432        long count;
 433
 434        lockdep_release(sem, 1, _RET_IP_);
 435
 436        count = ldsem_atomic_update(-LDSEM_WRITE_BIAS, sem);
 437        if (count < 0)
 438                ldsem_wake(sem);
 439}
 440
 441
 442#ifdef CONFIG_DEBUG_LOCK_ALLOC
 443
 444int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, long timeout)
 445{
 446        might_sleep();
 447        return __ldsem_down_read_nested(sem, subclass, timeout);
 448}
 449
 450int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
 451                            long timeout)
 452{
 453        might_sleep();
 454        return __ldsem_down_write_nested(sem, subclass, timeout);
 455}
 456
 457#endif
 458