linux/kernel/sched/wait.c
<<
>>
Prefs
   1/*
   2 * Generic waiting primitives.
   3 *
   4 * (C) 2004 Nadia Yvette Chambers, Oracle
   5 */
   6#include <linux/init.h>
   7#include <linux/export.h>
   8#include <linux/sched/signal.h>
   9#include <linux/sched/debug.h>
  10#include <linux/mm.h>
  11#include <linux/wait.h>
  12#include <linux/hash.h>
  13#include <linux/kthread.h>
  14
  15void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
  16{
  17        spin_lock_init(&wq_head->lock);
  18        lockdep_set_class_and_name(&wq_head->lock, key, name);
  19        INIT_LIST_HEAD(&wq_head->head);
  20}
  21
  22EXPORT_SYMBOL(__init_waitqueue_head);
  23
  24void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  25{
  26        unsigned long flags;
  27
  28        wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
  29        spin_lock_irqsave(&wq_head->lock, flags);
  30        __add_wait_queue(wq_head, wq_entry);
  31        spin_unlock_irqrestore(&wq_head->lock, flags);
  32}
  33EXPORT_SYMBOL(add_wait_queue);
  34
  35void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  36{
  37        unsigned long flags;
  38
  39        wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
  40        spin_lock_irqsave(&wq_head->lock, flags);
  41        __add_wait_queue_entry_tail(wq_head, wq_entry);
  42        spin_unlock_irqrestore(&wq_head->lock, flags);
  43}
  44EXPORT_SYMBOL(add_wait_queue_exclusive);
  45
  46void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
  47{
  48        unsigned long flags;
  49
  50        spin_lock_irqsave(&wq_head->lock, flags);
  51        __remove_wait_queue(wq_head, wq_entry);
  52        spin_unlock_irqrestore(&wq_head->lock, flags);
  53}
  54EXPORT_SYMBOL(remove_wait_queue);
  55
  56/*
  57 * Scan threshold to break wait queue walk.
  58 * This allows a waker to take a break from holding the
  59 * wait queue lock during the wait queue walk.
  60 */
  61#define WAITQUEUE_WALK_BREAK_CNT 64
  62
  63/*
  64 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  65 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  66 * number) then we wake all the non-exclusive tasks and one exclusive task.
  67 *
  68 * There are circumstances in which we can try to wake a task which has already
  69 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  70 * zero in this (rare) case, and we handle it by continuing to scan the queue.
  71 */
  72static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
  73                        int nr_exclusive, int wake_flags, void *key,
  74                        wait_queue_entry_t *bookmark)
  75{
  76        wait_queue_entry_t *curr, *next;
  77        int cnt = 0;
  78
  79        if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
  80                curr = list_next_entry(bookmark, entry);
  81
  82                list_del(&bookmark->entry);
  83                bookmark->flags = 0;
  84        } else
  85                curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry);
  86
  87        if (&curr->entry == &wq_head->head)
  88                return nr_exclusive;
  89
  90        list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) {
  91                unsigned flags = curr->flags;
  92                int ret;
  93
  94                if (flags & WQ_FLAG_BOOKMARK)
  95                        continue;
  96
  97                ret = curr->func(curr, mode, wake_flags, key);
  98                if (ret < 0)
  99                        break;
 100                if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
 101                        break;
 102
 103                if (bookmark && (++cnt > WAITQUEUE_WALK_BREAK_CNT) &&
 104                                (&next->entry != &wq_head->head)) {
 105                        bookmark->flags = WQ_FLAG_BOOKMARK;
 106                        list_add_tail(&bookmark->entry, &next->entry);
 107                        break;
 108                }
 109        }
 110        return nr_exclusive;
 111}
 112
 113static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
 114                        int nr_exclusive, int wake_flags, void *key)
 115{
 116        unsigned long flags;
 117        wait_queue_entry_t bookmark;
 118
 119        bookmark.flags = 0;
 120        bookmark.private = NULL;
 121        bookmark.func = NULL;
 122        INIT_LIST_HEAD(&bookmark.entry);
 123
 124        spin_lock_irqsave(&wq_head->lock, flags);
 125        nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags, key, &bookmark);
 126        spin_unlock_irqrestore(&wq_head->lock, flags);
 127
 128        while (bookmark.flags & WQ_FLAG_BOOKMARK) {
 129                spin_lock_irqsave(&wq_head->lock, flags);
 130                nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
 131                                                wake_flags, key, &bookmark);
 132                spin_unlock_irqrestore(&wq_head->lock, flags);
 133        }
 134}
 135
 136/**
 137 * __wake_up - wake up threads blocked on a waitqueue.
 138 * @wq_head: the waitqueue
 139 * @mode: which threads
 140 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 141 * @key: is directly passed to the wakeup function
 142 *
 143 * It may be assumed that this function implies a write memory barrier before
 144 * changing the task state if and only if any tasks are woken up.
 145 */
 146void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
 147                        int nr_exclusive, void *key)
 148{
 149        __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
 150}
 151EXPORT_SYMBOL(__wake_up);
 152
 153/*
 154 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
 155 */
 156void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr)
 157{
 158        __wake_up_common(wq_head, mode, nr, 0, NULL, NULL);
 159}
 160EXPORT_SYMBOL_GPL(__wake_up_locked);
 161
 162void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key)
 163{
 164        __wake_up_common(wq_head, mode, 1, 0, key, NULL);
 165}
 166EXPORT_SYMBOL_GPL(__wake_up_locked_key);
 167
 168void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
 169                unsigned int mode, void *key, wait_queue_entry_t *bookmark)
 170{
 171        __wake_up_common(wq_head, mode, 1, 0, key, bookmark);
 172}
 173EXPORT_SYMBOL_GPL(__wake_up_locked_key_bookmark);
 174
 175/**
 176 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
 177 * @wq_head: the waitqueue
 178 * @mode: which threads
 179 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 180 * @key: opaque value to be passed to wakeup targets
 181 *
 182 * The sync wakeup differs that the waker knows that it will schedule
 183 * away soon, so while the target thread will be woken up, it will not
 184 * be migrated to another CPU - ie. the two threads are 'synchronized'
 185 * with each other. This can prevent needless bouncing between CPUs.
 186 *
 187 * On UP it can prevent extra preemption.
 188 *
 189 * It may be assumed that this function implies a write memory barrier before
 190 * changing the task state if and only if any tasks are woken up.
 191 */
 192void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
 193                        int nr_exclusive, void *key)
 194{
 195        int wake_flags = 1; /* XXX WF_SYNC */
 196
 197        if (unlikely(!wq_head))
 198                return;
 199
 200        if (unlikely(nr_exclusive != 1))
 201                wake_flags = 0;
 202
 203        __wake_up_common_lock(wq_head, mode, nr_exclusive, wake_flags, key);
 204}
 205EXPORT_SYMBOL_GPL(__wake_up_sync_key);
 206
 207/*
 208 * __wake_up_sync - see __wake_up_sync_key()
 209 */
 210void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr_exclusive)
 211{
 212        __wake_up_sync_key(wq_head, mode, nr_exclusive, NULL);
 213}
 214EXPORT_SYMBOL_GPL(__wake_up_sync);      /* For internal use only */
 215
 216/*
 217 * Note: we use "set_current_state()" _after_ the wait-queue add,
 218 * because we need a memory barrier there on SMP, so that any
 219 * wake-function that tests for the wait-queue being active
 220 * will be guaranteed to see waitqueue addition _or_ subsequent
 221 * tests in this thread will see the wakeup having taken place.
 222 *
 223 * The spin_unlock() itself is semi-permeable and only protects
 224 * one way (it only protects stuff inside the critical region and
 225 * stops them from bleeding out - it would still allow subsequent
 226 * loads to move into the critical region).
 227 */
 228void
 229prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 230{
 231        unsigned long flags;
 232
 233        wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
 234        spin_lock_irqsave(&wq_head->lock, flags);
 235        if (list_empty(&wq_entry->entry))
 236                __add_wait_queue(wq_head, wq_entry);
 237        set_current_state(state);
 238        spin_unlock_irqrestore(&wq_head->lock, flags);
 239}
 240EXPORT_SYMBOL(prepare_to_wait);
 241
 242void
 243prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 244{
 245        unsigned long flags;
 246
 247        wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
 248        spin_lock_irqsave(&wq_head->lock, flags);
 249        if (list_empty(&wq_entry->entry))
 250                __add_wait_queue_entry_tail(wq_head, wq_entry);
 251        set_current_state(state);
 252        spin_unlock_irqrestore(&wq_head->lock, flags);
 253}
 254EXPORT_SYMBOL(prepare_to_wait_exclusive);
 255
 256void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
 257{
 258        wq_entry->flags = flags;
 259        wq_entry->private = current;
 260        wq_entry->func = autoremove_wake_function;
 261        INIT_LIST_HEAD(&wq_entry->entry);
 262}
 263EXPORT_SYMBOL(init_wait_entry);
 264
 265long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 266{
 267        unsigned long flags;
 268        long ret = 0;
 269
 270        spin_lock_irqsave(&wq_head->lock, flags);
 271        if (unlikely(signal_pending_state(state, current))) {
 272                /*
 273                 * Exclusive waiter must not fail if it was selected by wakeup,
 274                 * it should "consume" the condition we were waiting for.
 275                 *
 276                 * The caller will recheck the condition and return success if
 277                 * we were already woken up, we can not miss the event because
 278                 * wakeup locks/unlocks the same wq_head->lock.
 279                 *
 280                 * But we need to ensure that set-condition + wakeup after that
 281                 * can't see us, it should wake up another exclusive waiter if
 282                 * we fail.
 283                 */
 284                list_del_init(&wq_entry->entry);
 285                ret = -ERESTARTSYS;
 286        } else {
 287                if (list_empty(&wq_entry->entry)) {
 288                        if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
 289                                __add_wait_queue_entry_tail(wq_head, wq_entry);
 290                        else
 291                                __add_wait_queue(wq_head, wq_entry);
 292                }
 293                set_current_state(state);
 294        }
 295        spin_unlock_irqrestore(&wq_head->lock, flags);
 296
 297        return ret;
 298}
 299EXPORT_SYMBOL(prepare_to_wait_event);
 300
 301/*
 302 * Note! These two wait functions are entered with the
 303 * wait-queue lock held (and interrupts off in the _irq
 304 * case), so there is no race with testing the wakeup
 305 * condition in the caller before they add the wait
 306 * entry to the wake queue.
 307 */
 308int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
 309{
 310        if (likely(list_empty(&wait->entry)))
 311                __add_wait_queue_entry_tail(wq, wait);
 312
 313        set_current_state(TASK_INTERRUPTIBLE);
 314        if (signal_pending(current))
 315                return -ERESTARTSYS;
 316
 317        spin_unlock(&wq->lock);
 318        schedule();
 319        spin_lock(&wq->lock);
 320        return 0;
 321}
 322EXPORT_SYMBOL(do_wait_intr);
 323
 324int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
 325{
 326        if (likely(list_empty(&wait->entry)))
 327                __add_wait_queue_entry_tail(wq, wait);
 328
 329        set_current_state(TASK_INTERRUPTIBLE);
 330        if (signal_pending(current))
 331                return -ERESTARTSYS;
 332
 333        spin_unlock_irq(&wq->lock);
 334        schedule();
 335        spin_lock_irq(&wq->lock);
 336        return 0;
 337}
 338EXPORT_SYMBOL(do_wait_intr_irq);
 339
 340/**
 341 * finish_wait - clean up after waiting in a queue
 342 * @wq_head: waitqueue waited on
 343 * @wq_entry: wait descriptor
 344 *
 345 * Sets current thread back to running state and removes
 346 * the wait descriptor from the given waitqueue if still
 347 * queued.
 348 */
 349void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 350{
 351        unsigned long flags;
 352
 353        __set_current_state(TASK_RUNNING);
 354        /*
 355         * We can check for list emptiness outside the lock
 356         * IFF:
 357         *  - we use the "careful" check that verifies both
 358         *    the next and prev pointers, so that there cannot
 359         *    be any half-pending updates in progress on other
 360         *    CPU's that we haven't seen yet (and that might
 361         *    still change the stack area.
 362         * and
 363         *  - all other users take the lock (ie we can only
 364         *    have _one_ other CPU that looks at or modifies
 365         *    the list).
 366         */
 367        if (!list_empty_careful(&wq_entry->entry)) {
 368                spin_lock_irqsave(&wq_head->lock, flags);
 369                list_del_init(&wq_entry->entry);
 370                spin_unlock_irqrestore(&wq_head->lock, flags);
 371        }
 372}
 373EXPORT_SYMBOL(finish_wait);
 374
 375int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
 376{
 377        int ret = default_wake_function(wq_entry, mode, sync, key);
 378
 379        if (ret)
 380                list_del_init(&wq_entry->entry);
 381        return ret;
 382}
 383EXPORT_SYMBOL(autoremove_wake_function);
 384
 385static inline bool is_kthread_should_stop(void)
 386{
 387        return (current->flags & PF_KTHREAD) && kthread_should_stop();
 388}
 389
 390/*
 391 * DEFINE_WAIT_FUNC(wait, woken_wake_func);
 392 *
 393 * add_wait_queue(&wq_head, &wait);
 394 * for (;;) {
 395 *     if (condition)
 396 *         break;
 397 *
 398 *     p->state = mode;                         condition = true;
 399 *     smp_mb(); // A                           smp_wmb(); // C
 400 *     if (!wq_entry->flags & WQ_FLAG_WOKEN)    wq_entry->flags |= WQ_FLAG_WOKEN;
 401 *         schedule()                           try_to_wake_up();
 402 *     p->state = TASK_RUNNING;             ~~~~~~~~~~~~~~~~~~
 403 *     wq_entry->flags &= ~WQ_FLAG_WOKEN;               condition = true;
 404 *     smp_mb() // B                            smp_wmb(); // C
 405 *                                              wq_entry->flags |= WQ_FLAG_WOKEN;
 406 * }
 407 * remove_wait_queue(&wq_head, &wait);
 408 *
 409 */
 410long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
 411{
 412        set_current_state(mode); /* A */
 413        /*
 414         * The above implies an smp_mb(), which matches with the smp_wmb() from
 415         * woken_wake_function() such that if we observe WQ_FLAG_WOKEN we must
 416         * also observe all state before the wakeup.
 417         */
 418        if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
 419                timeout = schedule_timeout(timeout);
 420        __set_current_state(TASK_RUNNING);
 421
 422        /*
 423         * The below implies an smp_mb(), it too pairs with the smp_wmb() from
 424         * woken_wake_function() such that we must either observe the wait
 425         * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
 426         * an event.
 427         */
 428        smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */
 429
 430        return timeout;
 431}
 432EXPORT_SYMBOL(wait_woken);
 433
 434int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
 435{
 436        /*
 437         * Although this function is called under waitqueue lock, LOCK
 438         * doesn't imply write barrier and the users expects write
 439         * barrier semantics on wakeup functions.  The following
 440         * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
 441         * and is paired with smp_store_mb() in wait_woken().
 442         */
 443        smp_wmb(); /* C */
 444        wq_entry->flags |= WQ_FLAG_WOKEN;
 445
 446        return default_wake_function(wq_entry, mode, sync, key);
 447}
 448EXPORT_SYMBOL(woken_wake_function);
 449