linux/kernel/sched/swait.c
<<
>>
Prefs
   1#include <linux/sched.h>
   2#include <linux/swait.h>
   3
   4void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
   5                             struct lock_class_key *key)
   6{
   7        raw_spin_lock_init(&q->lock);
   8        lockdep_set_class_and_name(&q->lock, key, name);
   9        INIT_LIST_HEAD(&q->task_list);
  10}
  11EXPORT_SYMBOL(__init_swait_queue_head);
  12
  13/*
  14 * The thing about the wake_up_state() return value; I think we can ignore it.
  15 *
  16 * If for some reason it would return 0, that means the previously waiting
  17 * task is already running, so it will observe condition true (or has already).
  18 */
  19void swake_up_locked(struct swait_queue_head *q)
  20{
  21        struct swait_queue *curr;
  22
  23        if (list_empty(&q->task_list))
  24                return;
  25
  26        curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
  27        wake_up_process(curr->task);
  28        list_del_init(&curr->task_list);
  29}
  30EXPORT_SYMBOL(swake_up_locked);
  31
  32void swake_up(struct swait_queue_head *q)
  33{
  34        unsigned long flags;
  35
  36        if (!swait_active(q))
  37                return;
  38
  39        raw_spin_lock_irqsave(&q->lock, flags);
  40        swake_up_locked(q);
  41        raw_spin_unlock_irqrestore(&q->lock, flags);
  42}
  43EXPORT_SYMBOL(swake_up);
  44
  45/*
  46 * Does not allow usage from IRQ disabled, since we must be able to
  47 * release IRQs to guarantee bounded hold time.
  48 */
  49void swake_up_all(struct swait_queue_head *q)
  50{
  51        struct swait_queue *curr;
  52        LIST_HEAD(tmp);
  53
  54        if (!swait_active(q))
  55                return;
  56
  57        raw_spin_lock_irq(&q->lock);
  58        list_splice_init(&q->task_list, &tmp);
  59        while (!list_empty(&tmp)) {
  60                curr = list_first_entry(&tmp, typeof(*curr), task_list);
  61
  62                wake_up_state(curr->task, TASK_NORMAL);
  63                list_del_init(&curr->task_list);
  64
  65                if (list_empty(&tmp))
  66                        break;
  67
  68                raw_spin_unlock_irq(&q->lock);
  69                raw_spin_lock_irq(&q->lock);
  70        }
  71        raw_spin_unlock_irq(&q->lock);
  72}
  73EXPORT_SYMBOL(swake_up_all);
  74
  75void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
  76{
  77        wait->task = current;
  78        if (list_empty(&wait->task_list))
  79                list_add(&wait->task_list, &q->task_list);
  80}
  81
  82void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
  83{
  84        unsigned long flags;
  85
  86        raw_spin_lock_irqsave(&q->lock, flags);
  87        __prepare_to_swait(q, wait);
  88        set_current_state(state);
  89        raw_spin_unlock_irqrestore(&q->lock, flags);
  90}
  91EXPORT_SYMBOL(prepare_to_swait);
  92
  93long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
  94{
  95        if (signal_pending_state(state, current))
  96                return -ERESTARTSYS;
  97
  98        prepare_to_swait(q, wait, state);
  99
 100        return 0;
 101}
 102EXPORT_SYMBOL(prepare_to_swait_event);
 103
 104void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
 105{
 106        __set_current_state(TASK_RUNNING);
 107        if (!list_empty(&wait->task_list))
 108                list_del_init(&wait->task_list);
 109}
 110
 111void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
 112{
 113        unsigned long flags;
 114
 115        __set_current_state(TASK_RUNNING);
 116
 117        if (!list_empty_careful(&wait->task_list)) {
 118                raw_spin_lock_irqsave(&q->lock, flags);
 119                list_del_init(&wait->task_list);
 120                raw_spin_unlock_irqrestore(&q->lock, flags);
 121        }
 122}
 123EXPORT_SYMBOL(finish_swait);
 124