linux/kernel/wait.c
<<
>>
Prefs
   1/*
   2 * Generic waiting primitives.
   3 *
   4 * (C) 2004 William Irwin, Oracle
   5 */
   6#include <linux/init.h>
   7#include <linux/module.h>
   8#include <linux/sched.h>
   9#include <linux/mm.h>
  10#include <linux/wait.h>
  11#include <linux/hash.h>
  12
  13void init_waitqueue_head(wait_queue_head_t *q)
  14{
  15        spin_lock_init(&q->lock);
  16        INIT_LIST_HEAD(&q->task_list);
  17}
  18
  19EXPORT_SYMBOL(init_waitqueue_head);
  20
  21void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
  22{
  23        unsigned long flags;
  24
  25        wait->flags &= ~WQ_FLAG_EXCLUSIVE;
  26        spin_lock_irqsave(&q->lock, flags);
  27        __add_wait_queue(q, wait);
  28        spin_unlock_irqrestore(&q->lock, flags);
  29}
  30EXPORT_SYMBOL(add_wait_queue);
  31
  32void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
  33{
  34        unsigned long flags;
  35
  36        wait->flags |= WQ_FLAG_EXCLUSIVE;
  37        spin_lock_irqsave(&q->lock, flags);
  38        __add_wait_queue_tail(q, wait);
  39        spin_unlock_irqrestore(&q->lock, flags);
  40}
  41EXPORT_SYMBOL(add_wait_queue_exclusive);
  42
  43void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
  44{
  45        unsigned long flags;
  46
  47        spin_lock_irqsave(&q->lock, flags);
  48        __remove_wait_queue(q, wait);
  49        spin_unlock_irqrestore(&q->lock, flags);
  50}
  51EXPORT_SYMBOL(remove_wait_queue);
  52
  53
  54/*
  55 * Note: we use "set_current_state()" _after_ the wait-queue add,
  56 * because we need a memory barrier there on SMP, so that any
  57 * wake-function that tests for the wait-queue being active
  58 * will be guaranteed to see waitqueue addition _or_ subsequent
  59 * tests in this thread will see the wakeup having taken place.
  60 *
  61 * The spin_unlock() itself is semi-permeable and only protects
  62 * one way (it only protects stuff inside the critical region and
  63 * stops them from bleeding out - it would still allow subsequent
  64 * loads to move into the critical region).
  65 */
  66void fastcall
  67prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
  68{
  69        unsigned long flags;
  70
  71        wait->flags &= ~WQ_FLAG_EXCLUSIVE;
  72        spin_lock_irqsave(&q->lock, flags);
  73        if (list_empty(&wait->task_list))
  74                __add_wait_queue(q, wait);
  75        /*
  76         * don't alter the task state if this is just going to
  77         * queue an async wait queue callback
  78         */
  79        if (is_sync_wait(wait))
  80                set_current_state(state);
  81        spin_unlock_irqrestore(&q->lock, flags);
  82}
  83EXPORT_SYMBOL(prepare_to_wait);
  84
  85void fastcall
  86prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
  87{
  88        unsigned long flags;
  89
  90        wait->flags |= WQ_FLAG_EXCLUSIVE;
  91        spin_lock_irqsave(&q->lock, flags);
  92        if (list_empty(&wait->task_list))
  93                __add_wait_queue_tail(q, wait);
  94        /*
  95         * don't alter the task state if this is just going to
  96         * queue an async wait queue callback
  97         */
  98        if (is_sync_wait(wait))
  99                set_current_state(state);
 100        spin_unlock_irqrestore(&q->lock, flags);
 101}
 102EXPORT_SYMBOL(prepare_to_wait_exclusive);
 103
 104void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
 105{
 106        unsigned long flags;
 107
 108        __set_current_state(TASK_RUNNING);
 109        /*
 110         * We can check for list emptiness outside the lock
 111         * IFF:
 112         *  - we use the "careful" check that verifies both
 113         *    the next and prev pointers, so that there cannot
 114         *    be any half-pending updates in progress on other
 115         *    CPU's that we haven't seen yet (and that might
 116         *    still change the stack area.
 117         * and
 118         *  - all other users take the lock (ie we can only
 119         *    have _one_ other CPU that looks at or modifies
 120         *    the list).
 121         */
 122        if (!list_empty_careful(&wait->task_list)) {
 123                spin_lock_irqsave(&q->lock, flags);
 124                list_del_init(&wait->task_list);
 125                spin_unlock_irqrestore(&q->lock, flags);
 126        }
 127}
 128EXPORT_SYMBOL(finish_wait);
 129
 130int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
 131{
 132        int ret = default_wake_function(wait, mode, sync, key);
 133
 134        if (ret)
 135                list_del_init(&wait->task_list);
 136        return ret;
 137}
 138EXPORT_SYMBOL(autoremove_wake_function);
 139
 140int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
 141{
 142        struct wait_bit_key *key = arg;
 143        struct wait_bit_queue *wait_bit
 144                = container_of(wait, struct wait_bit_queue, wait);
 145
 146        if (wait_bit->key.flags != key->flags ||
 147                        wait_bit->key.bit_nr != key->bit_nr ||
 148                        test_bit(key->bit_nr, key->flags))
 149                return 0;
 150        else
 151                return autoremove_wake_function(wait, mode, sync, key);
 152}
 153EXPORT_SYMBOL(wake_bit_function);
 154
 155/*
 156 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
 157 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
 158 * permitted return codes. Nonzero return codes halt waiting and return.
 159 */
 160int __sched fastcall
 161__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
 162                        int (*action)(void *), unsigned mode)
 163{
 164        int ret = 0;
 165
 166        do {
 167                prepare_to_wait(wq, &q->wait, mode);
 168                if (test_bit(q->key.bit_nr, q->key.flags))
 169                        ret = (*action)(q->key.flags);
 170        } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
 171        finish_wait(wq, &q->wait);
 172        return ret;
 173}
 174EXPORT_SYMBOL(__wait_on_bit);
 175
 176int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
 177                                        int (*action)(void *), unsigned mode)
 178{
 179        wait_queue_head_t *wq = bit_waitqueue(word, bit);
 180        DEFINE_WAIT_BIT(wait, word, bit);
 181
 182        return __wait_on_bit(wq, &wait, action, mode);
 183}
 184EXPORT_SYMBOL(out_of_line_wait_on_bit);
 185
 186int __sched fastcall
 187__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
 188                        int (*action)(void *), unsigned mode)
 189{
 190        int ret = 0;
 191
 192        do {
 193                prepare_to_wait_exclusive(wq, &q->wait, mode);
 194                if (test_bit(q->key.bit_nr, q->key.flags)) {
 195                        if ((ret = (*action)(q->key.flags)))
 196                                break;
 197                }
 198        } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
 199        finish_wait(wq, &q->wait);
 200        return ret;
 201}
 202EXPORT_SYMBOL(__wait_on_bit_lock);
 203
 204int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
 205                                        int (*action)(void *), unsigned mode)
 206{
 207        wait_queue_head_t *wq = bit_waitqueue(word, bit);
 208        DEFINE_WAIT_BIT(wait, word, bit);
 209
 210        return __wait_on_bit_lock(wq, &wait, action, mode);
 211}
 212EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
 213
 214void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
 215{
 216        struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
 217        if (waitqueue_active(wq))
 218                __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
 219}
 220EXPORT_SYMBOL(__wake_up_bit);
 221
 222/**
 223 * wake_up_bit - wake up a waiter on a bit
 224 * @word: the word being waited on, a kernel virtual address
 225 * @bit: the bit of the word being waited on
 226 *
 227 * There is a standard hashed waitqueue table for generic use. This
 228 * is the part of the hashtable's accessor API that wakes up waiters
 229 * on a bit. For instance, if one were to have waiters on a bitflag,
 230 * one would call wake_up_bit() after clearing the bit.
 231 *
 232 * In order for this to function properly, as it uses waitqueue_active()
 233 * internally, some kind of memory barrier must be done prior to calling
 234 * this. Typically, this will be smp_mb__after_clear_bit(), but in some
 235 * cases where bitflags are manipulated non-atomically under a lock, one
 236 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
 237 * because spin_unlock() does not guarantee a memory barrier.
 238 */
 239void fastcall wake_up_bit(void *word, int bit)
 240{
 241        __wake_up_bit(bit_waitqueue(word, bit), word, bit);
 242}
 243EXPORT_SYMBOL(wake_up_bit);
 244
 245fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit)
 246{
 247        const int shift = BITS_PER_LONG == 32 ? 5 : 6;
 248        const struct zone *zone = page_zone(virt_to_page(word));
 249        unsigned long val = (unsigned long)word << shift | bit;
 250
 251        return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
 252}
 253EXPORT_SYMBOL(bit_waitqueue);
 254