linux/kernel/locking/rwsem-spinlock.c
<<
>>
Prefs
   1/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
   2 * generic spinlock implementation
   3 *
   4 * Copyright (c) 2001   David Howells (dhowells@redhat.com).
   5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
   6 * - Derived also from comments by Linus
   7 */
   8#include <linux/rwsem.h>
   9#include <linux/sched.h>
  10#include <linux/export.h>
  11
  12enum rwsem_waiter_type {
  13        RWSEM_WAITING_FOR_WRITE,
  14        RWSEM_WAITING_FOR_READ
  15};
  16
  17struct rwsem_waiter {
  18        struct list_head list;
  19        struct task_struct *task;
  20        enum rwsem_waiter_type type;
  21};
  22
  23int rwsem_is_locked(struct rw_semaphore *sem)
  24{
  25        int ret = 1;
  26        unsigned long flags;
  27
  28        if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
  29                ret = (sem->count != 0);
  30                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  31        }
  32        return ret;
  33}
  34EXPORT_SYMBOL(rwsem_is_locked);
  35
  36/*
  37 * initialise the semaphore
  38 */
  39void __init_rwsem(struct rw_semaphore *sem, const char *name,
  40                  struct lock_class_key *key)
  41{
  42#ifdef CONFIG_DEBUG_LOCK_ALLOC
  43        /*
  44         * Make sure we are not reinitializing a held semaphore:
  45         */
  46        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  47        lockdep_init_map(&sem->dep_map, name, key, 0);
  48#endif
  49        sem->count = 0;
  50        raw_spin_lock_init(&sem->wait_lock);
  51        INIT_LIST_HEAD(&sem->wait_list);
  52}
  53EXPORT_SYMBOL(__init_rwsem);
  54
  55/*
  56 * handle the lock release when processes blocked on it that can now run
  57 * - if we come here, then:
  58 *   - the 'active count' _reached_ zero
  59 *   - the 'waiting count' is non-zero
  60 * - the spinlock must be held by the caller
  61 * - woken process blocks are discarded from the list after having task zeroed
  62 * - writers are only woken if wakewrite is non-zero
  63 */
  64static inline struct rw_semaphore *
  65__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
  66{
  67        struct rwsem_waiter *waiter;
  68        struct task_struct *tsk;
  69        int woken;
  70
  71        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  72
  73        if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
  74                if (wakewrite)
  75                        /* Wake up a writer. Note that we do not grant it the
  76                         * lock - it will have to acquire it when it runs. */
  77                        wake_up_process(waiter->task);
  78                goto out;
  79        }
  80
  81        /* grant an infinite number of read locks to the front of the queue */
  82        woken = 0;
  83        do {
  84                struct list_head *next = waiter->list.next;
  85
  86                list_del(&waiter->list);
  87                tsk = waiter->task;
  88                /*
  89                 * Make sure we do not wakeup the next reader before
  90                 * setting the nil condition to grant the next reader;
  91                 * otherwise we could miss the wakeup on the other
  92                 * side and end up sleeping again. See the pairing
  93                 * in rwsem_down_read_failed().
  94                 */
  95                smp_mb();
  96                waiter->task = NULL;
  97                wake_up_process(tsk);
  98                put_task_struct(tsk);
  99                woken++;
 100                if (next == &sem->wait_list)
 101                        break;
 102                waiter = list_entry(next, struct rwsem_waiter, list);
 103        } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
 104
 105        sem->count += woken;
 106
 107 out:
 108        return sem;
 109}
 110
 111/*
 112 * wake a single writer
 113 */
 114static inline struct rw_semaphore *
 115__rwsem_wake_one_writer(struct rw_semaphore *sem)
 116{
 117        struct rwsem_waiter *waiter;
 118
 119        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
 120        wake_up_process(waiter->task);
 121
 122        return sem;
 123}
 124
 125/*
 126 * get a read lock on the semaphore
 127 */
 128void __sched __down_read(struct rw_semaphore *sem)
 129{
 130        struct rwsem_waiter waiter;
 131        struct task_struct *tsk;
 132        unsigned long flags;
 133
 134        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 135
 136        if (sem->count >= 0 && list_empty(&sem->wait_list)) {
 137                /* granted */
 138                sem->count++;
 139                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 140                goto out;
 141        }
 142
 143        tsk = current;
 144        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 145
 146        /* set up my own style of waitqueue */
 147        waiter.task = tsk;
 148        waiter.type = RWSEM_WAITING_FOR_READ;
 149        get_task_struct(tsk);
 150
 151        list_add_tail(&waiter.list, &sem->wait_list);
 152
 153        /* we don't need to touch the semaphore struct anymore */
 154        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 155
 156        /* wait to be given the lock */
 157        for (;;) {
 158                if (!waiter.task)
 159                        break;
 160                schedule();
 161                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 162        }
 163
 164        __set_task_state(tsk, TASK_RUNNING);
 165 out:
 166        ;
 167}
 168
 169/*
 170 * trylock for reading -- returns 1 if successful, 0 if contention
 171 */
 172int __down_read_trylock(struct rw_semaphore *sem)
 173{
 174        unsigned long flags;
 175        int ret = 0;
 176
 177
 178        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 179
 180        if (sem->count >= 0 && list_empty(&sem->wait_list)) {
 181                /* granted */
 182                sem->count++;
 183                ret = 1;
 184        }
 185
 186        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 187
 188        return ret;
 189}
 190
 191/*
 192 * get a write lock on the semaphore
 193 */
 194int __sched __down_write_common(struct rw_semaphore *sem, int state)
 195{
 196        struct rwsem_waiter waiter;
 197        struct task_struct *tsk;
 198        unsigned long flags;
 199        int ret = 0;
 200
 201        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 202
 203        /* set up my own style of waitqueue */
 204        tsk = current;
 205        waiter.task = tsk;
 206        waiter.type = RWSEM_WAITING_FOR_WRITE;
 207        list_add_tail(&waiter.list, &sem->wait_list);
 208
 209        /* wait for someone to release the lock */
 210        for (;;) {
 211                /*
 212                 * That is the key to support write lock stealing: allows the
 213                 * task already on CPU to get the lock soon rather than put
 214                 * itself into sleep and waiting for system woke it or someone
 215                 * else in the head of the wait list up.
 216                 */
 217                if (sem->count == 0)
 218                        break;
 219                if (signal_pending_state(state, current)) {
 220                        ret = -EINTR;
 221                        goto out;
 222                }
 223                set_task_state(tsk, state);
 224                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 225                schedule();
 226                raw_spin_lock_irqsave(&sem->wait_lock, flags);
 227        }
 228        /* got the lock */
 229        sem->count = -1;
 230out:
 231        list_del(&waiter.list);
 232
 233        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 234
 235        return ret;
 236}
 237
 238void __sched __down_write(struct rw_semaphore *sem)
 239{
 240        __down_write_common(sem, TASK_UNINTERRUPTIBLE);
 241}
 242
 243int __sched __down_write_killable(struct rw_semaphore *sem)
 244{
 245        return __down_write_common(sem, TASK_KILLABLE);
 246}
 247
 248/*
 249 * trylock for writing -- returns 1 if successful, 0 if contention
 250 */
 251int __down_write_trylock(struct rw_semaphore *sem)
 252{
 253        unsigned long flags;
 254        int ret = 0;
 255
 256        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 257
 258        if (sem->count == 0) {
 259                /* got the lock */
 260                sem->count = -1;
 261                ret = 1;
 262        }
 263
 264        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 265
 266        return ret;
 267}
 268
 269/*
 270 * release a read lock on the semaphore
 271 */
 272void __up_read(struct rw_semaphore *sem)
 273{
 274        unsigned long flags;
 275
 276        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 277
 278        if (--sem->count == 0 && !list_empty(&sem->wait_list))
 279                sem = __rwsem_wake_one_writer(sem);
 280
 281        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 282}
 283
 284/*
 285 * release a write lock on the semaphore
 286 */
 287void __up_write(struct rw_semaphore *sem)
 288{
 289        unsigned long flags;
 290
 291        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 292
 293        sem->count = 0;
 294        if (!list_empty(&sem->wait_list))
 295                sem = __rwsem_do_wake(sem, 1);
 296
 297        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 298}
 299
 300/*
 301 * downgrade a write lock into a read lock
 302 * - just wake up any readers at the front of the queue
 303 */
 304void __downgrade_write(struct rw_semaphore *sem)
 305{
 306        unsigned long flags;
 307
 308        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 309
 310        sem->count = 1;
 311        if (!list_empty(&sem->wait_list))
 312                sem = __rwsem_do_wake(sem, 0);
 313
 314        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 315}
 316
 317