linux/lib/rwsem-spinlock.c
<<
>>
Prefs
   1/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
   2 * generic spinlock implementation
   3 *
   4 * Copyright (c) 2001   David Howells (dhowells@redhat.com).
   5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
   6 * - Derived also from comments by Linus
   7 */
   8#include <linux/rwsem.h>
   9#include <linux/sched.h>
  10#include <linux/export.h>
  11
  12enum rwsem_waiter_type {
  13        RWSEM_WAITING_FOR_WRITE,
  14        RWSEM_WAITING_FOR_READ
  15};
  16
  17struct rwsem_waiter {
  18        struct list_head list;
  19        struct task_struct *task;
  20        enum rwsem_waiter_type type;
  21};
  22
  23int rwsem_is_locked(struct rw_semaphore *sem)
  24{
  25        int ret = 1;
  26        unsigned long flags;
  27
  28        if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
  29                ret = (sem->activity != 0);
  30                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  31        }
  32        return ret;
  33}
  34EXPORT_SYMBOL(rwsem_is_locked);
  35
  36/*
  37 * initialise the semaphore
  38 */
  39void __init_rwsem(struct rw_semaphore *sem, const char *name,
  40                  struct lock_class_key *key)
  41{
  42#ifdef CONFIG_DEBUG_LOCK_ALLOC
  43        /*
  44         * Make sure we are not reinitializing a held semaphore:
  45         */
  46        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  47        lockdep_init_map(&sem->dep_map, name, key, 0);
  48#endif
  49        sem->activity = 0;
  50        raw_spin_lock_init(&sem->wait_lock);
  51        INIT_LIST_HEAD(&sem->wait_list);
  52}
  53EXPORT_SYMBOL(__init_rwsem);
  54
  55/*
  56 * handle the lock release when processes blocked on it that can now run
  57 * - if we come here, then:
  58 *   - the 'active count' _reached_ zero
  59 *   - the 'waiting count' is non-zero
  60 * - the spinlock must be held by the caller
  61 * - woken process blocks are discarded from the list after having task zeroed
  62 * - writers are only woken if wakewrite is non-zero
  63 */
  64static inline struct rw_semaphore *
  65__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
  66{
  67        struct rwsem_waiter *waiter;
  68        struct task_struct *tsk;
  69        int woken;
  70
  71        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  72
  73        if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
  74                if (wakewrite)
  75                        /* Wake up a writer. Note that we do not grant it the
  76                         * lock - it will have to acquire it when it runs. */
  77                        wake_up_process(waiter->task);
  78                goto out;
  79        }
  80
  81        /* grant an infinite number of read locks to the front of the queue */
  82        woken = 0;
  83        do {
  84                struct list_head *next = waiter->list.next;
  85
  86                list_del(&waiter->list);
  87                tsk = waiter->task;
  88                /*
  89                 * Make sure we do not wakeup the next reader before
  90                 * setting the nil condition to grant the next reader;
  91                 * otherwise we could miss the wakeup on the other
  92                 * side and end up sleeping again. See the pairing
  93                 * in rwsem_down_read_failed().
  94                 */
  95                smp_mb();
  96                waiter->task = NULL;
  97                wake_up_process(tsk);
  98                put_task_struct(tsk);
  99                woken++;
 100                if (next == &sem->wait_list)
 101                        break;
 102                waiter = list_entry(next, struct rwsem_waiter, list);
 103        } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
 104
 105        sem->activity += woken;
 106
 107 out:
 108        return sem;
 109}
 110
 111/*
 112 * wake a single writer
 113 */
 114static inline struct rw_semaphore *
 115__rwsem_wake_one_writer(struct rw_semaphore *sem)
 116{
 117        struct rwsem_waiter *waiter;
 118
 119        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
 120        wake_up_process(waiter->task);
 121
 122        return sem;
 123}
 124
 125/*
 126 * get a read lock on the semaphore
 127 */
 128void __sched __down_read(struct rw_semaphore *sem)
 129{
 130        struct rwsem_waiter waiter;
 131        struct task_struct *tsk;
 132        unsigned long flags;
 133
 134        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 135
 136        if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
 137                /* granted */
 138                sem->activity++;
 139                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 140                goto out;
 141        }
 142
 143        tsk = current;
 144        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 145
 146        /* set up my own style of waitqueue */
 147        waiter.task = tsk;
 148        waiter.type = RWSEM_WAITING_FOR_READ;
 149        get_task_struct(tsk);
 150
 151        list_add_tail(&waiter.list, &sem->wait_list);
 152
 153        /* we don't need to touch the semaphore struct anymore */
 154        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 155
 156        /* wait to be given the lock */
 157        for (;;) {
 158                if (!waiter.task)
 159                        break;
 160                schedule();
 161                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 162        }
 163
 164        __set_task_state(tsk, TASK_RUNNING);
 165 out:
 166        ;
 167}
 168
 169/*
 170 * trylock for reading -- returns 1 if successful, 0 if contention
 171 */
 172int __down_read_trylock(struct rw_semaphore *sem)
 173{
 174        unsigned long flags;
 175        int ret = 0;
 176
 177
 178        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 179
 180        if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
 181                /* granted */
 182                sem->activity++;
 183                ret = 1;
 184        }
 185
 186        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 187
 188        return ret;
 189}
 190
 191/*
 192 * get a write lock on the semaphore
 193 */
 194void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
 195{
 196        struct rwsem_waiter waiter;
 197        struct task_struct *tsk;
 198        unsigned long flags;
 199
 200        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 201
 202        /* set up my own style of waitqueue */
 203        tsk = current;
 204        waiter.task = tsk;
 205        waiter.type = RWSEM_WAITING_FOR_WRITE;
 206        list_add_tail(&waiter.list, &sem->wait_list);
 207
 208        /* wait for someone to release the lock */
 209        for (;;) {
 210                /*
 211                 * That is the key to support write lock stealing: allows the
 212                 * task already on CPU to get the lock soon rather than put
 213                 * itself into sleep and waiting for system woke it or someone
 214                 * else in the head of the wait list up.
 215                 */
 216                if (sem->activity == 0)
 217                        break;
 218                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 219                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 220                schedule();
 221                raw_spin_lock_irqsave(&sem->wait_lock, flags);
 222        }
 223        /* got the lock */
 224        sem->activity = -1;
 225        list_del(&waiter.list);
 226
 227        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 228}
 229
 230void __sched __down_write(struct rw_semaphore *sem)
 231{
 232        __down_write_nested(sem, 0);
 233}
 234
 235/*
 236 * trylock for writing -- returns 1 if successful, 0 if contention
 237 */
 238int __down_write_trylock(struct rw_semaphore *sem)
 239{
 240        unsigned long flags;
 241        int ret = 0;
 242
 243        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 244
 245        if (sem->activity == 0) {
 246                /* got the lock */
 247                sem->activity = -1;
 248                ret = 1;
 249        }
 250
 251        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 252
 253        return ret;
 254}
 255
 256/*
 257 * release a read lock on the semaphore
 258 */
 259void __up_read(struct rw_semaphore *sem)
 260{
 261        unsigned long flags;
 262
 263        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 264
 265        if (--sem->activity == 0 && !list_empty(&sem->wait_list))
 266                sem = __rwsem_wake_one_writer(sem);
 267
 268        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 269}
 270
 271/*
 272 * release a write lock on the semaphore
 273 */
 274void __up_write(struct rw_semaphore *sem)
 275{
 276        unsigned long flags;
 277
 278        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 279
 280        sem->activity = 0;
 281        if (!list_empty(&sem->wait_list))
 282                sem = __rwsem_do_wake(sem, 1);
 283
 284        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 285}
 286
 287/*
 288 * downgrade a write lock into a read lock
 289 * - just wake up any readers at the front of the queue
 290 */
 291void __downgrade_write(struct rw_semaphore *sem)
 292{
 293        unsigned long flags;
 294
 295        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 296
 297        sem->activity = 1;
 298        if (!list_empty(&sem->wait_list))
 299                sem = __rwsem_do_wake(sem, 0);
 300
 301        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
 302}
 303
 304