linux/lib/rwsem.c
<<
>>
Prefs
   1/* rwsem.c: R/W semaphores: contention handling functions
   2 *
   3 * Written by David Howells (dhowells@redhat.com).
   4 * Derived from arch/i386/kernel/semaphore.c
   5 *
   6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
   7 * and Michel Lespinasse <walken@google.com>
   8 */
   9#include <linux/rwsem.h>
  10#include <linux/sched.h>
  11#include <linux/init.h>
  12#include <linux/export.h>
  13
  14/*
  15 * Initialize an rwsem:
  16 */
  17void __init_rwsem(struct rw_semaphore *sem, const char *name,
  18                  struct lock_class_key *key)
  19{
  20#ifdef CONFIG_DEBUG_LOCK_ALLOC
  21        /*
  22         * Make sure we are not reinitializing a held semaphore:
  23         */
  24        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  25        lockdep_init_map(&sem->dep_map, name, key, 0);
  26#endif
  27        sem->count = RWSEM_UNLOCKED_VALUE;
  28        raw_spin_lock_init(&sem->wait_lock);
  29        INIT_LIST_HEAD(&sem->wait_list);
  30}
  31
  32EXPORT_SYMBOL(__init_rwsem);
  33
  34enum rwsem_waiter_type {
  35        RWSEM_WAITING_FOR_WRITE,
  36        RWSEM_WAITING_FOR_READ
  37};
  38
  39struct rwsem_waiter {
  40        struct list_head list;
  41        struct task_struct *task;
  42        enum rwsem_waiter_type type;
  43};
  44
  45enum rwsem_wake_type {
  46        RWSEM_WAKE_ANY,         /* Wake whatever's at head of wait list */
  47        RWSEM_WAKE_READERS,     /* Wake readers only */
  48        RWSEM_WAKE_READ_OWNED   /* Waker thread holds the read lock */
  49};
  50
  51/*
  52 * handle the lock release when processes blocked on it that can now run
  53 * - if we come here from up_xxxx(), then:
  54 *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
  55 *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
  56 * - there must be someone on the queue
  57 * - the spinlock must be held by the caller
  58 * - woken process blocks are discarded from the list after having task zeroed
  59 * - writers are only woken if downgrading is false
  60 */
  61static struct rw_semaphore *
  62__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
  63{
  64        struct rwsem_waiter *waiter;
  65        struct task_struct *tsk;
  66        struct list_head *next;
  67        long oldcount, woken, loop, adjustment;
  68
  69        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  70        if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
  71                if (wake_type == RWSEM_WAKE_ANY)
  72                        /* Wake writer at the front of the queue, but do not
  73                         * grant it the lock yet as we want other writers
  74                         * to be able to steal it.  Readers, on the other hand,
  75                         * will block as they will notice the queued writer.
  76                         */
  77                        wake_up_process(waiter->task);
  78                goto out;
  79        }
  80
  81        /* Writers might steal the lock before we grant it to the next reader.
  82         * We prefer to do the first reader grant before counting readers
  83         * so we can bail out early if a writer stole the lock.
  84         */
  85        adjustment = 0;
  86        if (wake_type != RWSEM_WAKE_READ_OWNED) {
  87                adjustment = RWSEM_ACTIVE_READ_BIAS;
  88 try_reader_grant:
  89                oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
  90                if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
  91                        /* A writer stole the lock. Undo our reader grant. */
  92                        if (rwsem_atomic_update(-adjustment, sem) &
  93                                                RWSEM_ACTIVE_MASK)
  94                                goto out;
  95                        /* Last active locker left. Retry waking readers. */
  96                        goto try_reader_grant;
  97                }
  98        }
  99
 100        /* Grant an infinite number of read locks to the readers at the front
 101         * of the queue.  Note we increment the 'active part' of the count by
 102         * the number of readers before waking any processes up.
 103         */
 104        woken = 0;
 105        do {
 106                woken++;
 107
 108                if (waiter->list.next == &sem->wait_list)
 109                        break;
 110
 111                waiter = list_entry(waiter->list.next,
 112                                        struct rwsem_waiter, list);
 113
 114        } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
 115
 116        adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
 117        if (waiter->type != RWSEM_WAITING_FOR_WRITE)
 118                /* hit end of list above */
 119                adjustment -= RWSEM_WAITING_BIAS;
 120
 121        if (adjustment)
 122                rwsem_atomic_add(adjustment, sem);
 123
 124        next = sem->wait_list.next;
 125        loop = woken;
 126        do {
 127                waiter = list_entry(next, struct rwsem_waiter, list);
 128                next = waiter->list.next;
 129                tsk = waiter->task;
 130                smp_mb();
 131                waiter->task = NULL;
 132                wake_up_process(tsk);
 133                put_task_struct(tsk);
 134        } while (--loop);
 135
 136        sem->wait_list.next = next;
 137        next->prev = &sem->wait_list;
 138
 139 out:
 140        return sem;
 141}
 142
 143/*
 144 * wait for the read lock to be granted
 145 */
 146struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
 147{
 148        long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
 149        struct rwsem_waiter waiter;
 150        struct task_struct *tsk = current;
 151
 152        /* set up my own style of waitqueue */
 153        waiter.task = tsk;
 154        waiter.type = RWSEM_WAITING_FOR_READ;
 155        get_task_struct(tsk);
 156
 157        raw_spin_lock_irq(&sem->wait_lock);
 158        if (list_empty(&sem->wait_list))
 159                adjustment += RWSEM_WAITING_BIAS;
 160        list_add_tail(&waiter.list, &sem->wait_list);
 161
 162        /* we're now waiting on the lock, but no longer actively locking */
 163        count = rwsem_atomic_update(adjustment, sem);
 164
 165        /* If there are no active locks, wake the front queued process(es).
 166         *
 167         * If there are no writers and we are first in the queue,
 168         * wake our own waiter to join the existing active readers !
 169         */
 170        if (count == RWSEM_WAITING_BIAS ||
 171            (count > RWSEM_WAITING_BIAS &&
 172             adjustment != -RWSEM_ACTIVE_READ_BIAS))
 173                sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
 174
 175        raw_spin_unlock_irq(&sem->wait_lock);
 176
 177        /* wait to be given the lock */
 178        while (true) {
 179                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 180                if (!waiter.task)
 181                        break;
 182                schedule();
 183        }
 184
 185        tsk->state = TASK_RUNNING;
 186
 187        return sem;
 188}
 189
 190/*
 191 * wait until we successfully acquire the write lock
 192 */
 193struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
 194{
 195        long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
 196        struct rwsem_waiter waiter;
 197        struct task_struct *tsk = current;
 198
 199        /* set up my own style of waitqueue */
 200        waiter.task = tsk;
 201        waiter.type = RWSEM_WAITING_FOR_WRITE;
 202
 203        raw_spin_lock_irq(&sem->wait_lock);
 204        if (list_empty(&sem->wait_list))
 205                adjustment += RWSEM_WAITING_BIAS;
 206        list_add_tail(&waiter.list, &sem->wait_list);
 207
 208        /* we're now waiting on the lock, but no longer actively locking */
 209        count = rwsem_atomic_update(adjustment, sem);
 210
 211        /* If there were already threads queued before us and there are no
 212         * active writers, the lock must be read owned; so we try to wake
 213         * any read locks that were queued ahead of us. */
 214        if (count > RWSEM_WAITING_BIAS &&
 215            adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
 216                sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
 217
 218        /* wait until we successfully acquire the lock */
 219        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 220        while (true) {
 221                if (!(count & RWSEM_ACTIVE_MASK)) {
 222                        /* Try acquiring the write lock. */
 223                        count = RWSEM_ACTIVE_WRITE_BIAS;
 224                        if (!list_is_singular(&sem->wait_list))
 225                                count += RWSEM_WAITING_BIAS;
 226
 227                        if (sem->count == RWSEM_WAITING_BIAS &&
 228                            cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
 229                                                        RWSEM_WAITING_BIAS)
 230                                break;
 231                }
 232
 233                raw_spin_unlock_irq(&sem->wait_lock);
 234
 235                /* Block until there are no active lockers. */
 236                do {
 237                        schedule();
 238                        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 239                } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
 240
 241                raw_spin_lock_irq(&sem->wait_lock);
 242        }
 243
 244        list_del(&waiter.list);
 245        raw_spin_unlock_irq(&sem->wait_lock);
 246        tsk->state = TASK_RUNNING;
 247
 248        return sem;
 249}
 250
 251/*
 252 * handle waking up a waiter on the semaphore
 253 * - up_read/up_write has decremented the active part of count if we come here
 254 */
 255struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 256{
 257        unsigned long flags;
 258
 259        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 260
 261        /* do nothing if list empty */
 262        if (!list_empty(&sem->wait_list))
 263                sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
 264
 265        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 266
 267        return sem;
 268}
 269
 270/*
 271 * downgrade a write lock into a read lock
 272 * - caller incremented waiting part of count and discovered it still negative
 273 * - just wake up any readers at the front of the queue
 274 */
 275struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
 276{
 277        unsigned long flags;
 278
 279        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 280
 281        /* do nothing if list empty */
 282        if (!list_empty(&sem->wait_list))
 283                sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
 284
 285        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 286
 287        return sem;
 288}
 289
 290EXPORT_SYMBOL(rwsem_down_read_failed);
 291EXPORT_SYMBOL(rwsem_down_write_failed);
 292EXPORT_SYMBOL(rwsem_wake);
 293EXPORT_SYMBOL(rwsem_downgrade_wake);
 294