1
2
3
4
5
6#include <linux/init.h>
7#include <linux/export.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/wait.h>
11#include <linux/hash.h>
12#include <linux/kthread.h>
13
14void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key)
15{
16 spin_lock_init(&q->lock);
17 lockdep_set_class_and_name(&q->lock, key, name);
18 INIT_LIST_HEAD(&q->task_list);
19}
20
21EXPORT_SYMBOL(__init_waitqueue_head);
22
23void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
24{
25 unsigned long flags;
26
27 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
28 spin_lock_irqsave(&q->lock, flags);
29 __add_wait_queue(q, wait);
30 spin_unlock_irqrestore(&q->lock, flags);
31}
32EXPORT_SYMBOL(add_wait_queue);
33
34void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
35{
36 unsigned long flags;
37
38 wait->flags |= WQ_FLAG_EXCLUSIVE;
39 spin_lock_irqsave(&q->lock, flags);
40 __add_wait_queue_tail(q, wait);
41 spin_unlock_irqrestore(&q->lock, flags);
42}
43EXPORT_SYMBOL(add_wait_queue_exclusive);
44
45void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
46{
47 unsigned long flags;
48
49 spin_lock_irqsave(&q->lock, flags);
50 __remove_wait_queue(q, wait);
51 spin_unlock_irqrestore(&q->lock, flags);
52}
53EXPORT_SYMBOL(remove_wait_queue);
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68void
69prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
70{
71 unsigned long flags;
72
73 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
74 spin_lock_irqsave(&q->lock, flags);
75 if (list_empty(&wait->task_list))
76 __add_wait_queue(q, wait);
77 set_current_state(state);
78 spin_unlock_irqrestore(&q->lock, flags);
79}
80EXPORT_SYMBOL(prepare_to_wait);
81
82void
83prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
84{
85 unsigned long flags;
86
87 wait->flags |= WQ_FLAG_EXCLUSIVE;
88 spin_lock_irqsave(&q->lock, flags);
89 if (list_empty(&wait->task_list))
90 __add_wait_queue_tail(q, wait);
91 set_current_state(state);
92 spin_unlock_irqrestore(&q->lock, flags);
93}
94EXPORT_SYMBOL(prepare_to_wait_exclusive);
95
96
97
98
99
100
101
102
103
104
105void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
106{
107 unsigned long flags;
108
109 __set_current_state(TASK_RUNNING);
110
111
112
113
114
115
116
117
118
119
120
121
122
123 if (!list_empty_careful(&wait->task_list)) {
124 spin_lock_irqsave(&q->lock, flags);
125 list_del_init(&wait->task_list);
126 spin_unlock_irqrestore(&q->lock, flags);
127 }
128}
129EXPORT_SYMBOL(finish_wait);
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
150 unsigned int mode, void *key)
151{
152 unsigned long flags;
153
154 __set_current_state(TASK_RUNNING);
155 spin_lock_irqsave(&q->lock, flags);
156 if (!list_empty(&wait->task_list))
157 list_del_init(&wait->task_list);
158 else if (waitqueue_active(q))
159 __wake_up_locked_key(q, mode, key);
160 spin_unlock_irqrestore(&q->lock, flags);
161}
162EXPORT_SYMBOL(abort_exclusive_wait);
163
164int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
165{
166 int ret = default_wake_function(wait, mode, sync, key);
167
168 if (ret)
169 list_del_init(&wait->task_list);
170 return ret;
171}
172EXPORT_SYMBOL(autoremove_wake_function);
173
174static inline bool is_kthread_should_stop(void)
175{
176 return (current->flags & PF_KTHREAD) && kthread_should_stop();
177}
178
179static int
180var_wake_function(wait_queue_t *wq_entry, unsigned int mode,
181 int sync, void *arg)
182{
183 struct wait_bit_key *key = arg;
184 struct wait_bit_queue *wbq_entry =
185 container_of(wq_entry, struct wait_bit_queue, wait);
186
187 if (wbq_entry->key.flags != key->flags ||
188 wbq_entry->key.bit_nr != key->bit_nr)
189 return 0;
190
191 return autoremove_wake_function(wq_entry, mode, sync, key);
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
215{
216 set_current_state(mode);
217
218
219
220
221
222 if (!(wait->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop())
223 timeout = schedule_timeout(timeout);
224 __set_current_state(TASK_RUNNING);
225
226
227
228
229
230
231
232 smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN);
233
234 return timeout;
235}
236EXPORT_SYMBOL(wait_woken);
237
238int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
239{
240
241
242
243
244
245
246
247 smp_wmb();
248 wait->flags |= WQ_FLAG_WOKEN;
249
250 return default_wake_function(wait, mode, sync, key);
251}
252EXPORT_SYMBOL(woken_wake_function);
253
254
255
256
257
258
259int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
260{
261 struct wait_bit_key_deprecated *key = arg;
262 struct wait_bit_queue_deprecated *wait_bit
263 = container_of(wait, struct wait_bit_queue_deprecated, wait);
264
265 if (wait_bit->key.flags != key->flags ||
266 wait_bit->key.bit_nr != key->bit_nr ||
267 test_bit(key->bit_nr, key->flags))
268 return 0;
269 else
270 return autoremove_wake_function(wait, mode, sync, key);
271}
272EXPORT_SYMBOL(wake_bit_function);
273
274int wake_bit_function_rh(wait_queue_t *wait, unsigned mode, int sync, void *arg)
275{
276 struct wait_bit_key *key = arg;
277 struct wait_bit_queue *wait_bit
278 = container_of(wait, struct wait_bit_queue, wait);
279
280 if (wait_bit->key.flags != key->flags ||
281 wait_bit->key.bit_nr != key->bit_nr ||
282 test_bit(key->bit_nr, key->flags))
283 return 0;
284 else
285 return autoremove_wake_function(wait, mode, sync, key);
286}
287EXPORT_SYMBOL(wake_bit_function_rh);
288
289
290
291
292
293
294int __sched
295__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
296 wait_bit_action_f *action, unsigned mode)
297{
298 int ret = 0;
299
300 do {
301 prepare_to_wait(wq, &q->wait, mode);
302 if (test_bit(q->key.bit_nr, q->key.flags))
303 ret = (*action)(&q->key, mode);
304 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
305 finish_wait(wq, &q->wait);
306 return ret;
307}
308EXPORT_SYMBOL(__wait_on_bit);
309
310int __sched out_of_line_wait_on_bit(void *word, int bit,
311 wait_bit_action_f *action, unsigned mode)
312{
313 wait_queue_head_t *wq = bit_waitqueue(word, bit);
314 DEFINE_WAIT_BIT(wait, word, bit);
315
316 return __wait_on_bit(wq, &wait, action, mode);
317}
318EXPORT_SYMBOL(out_of_line_wait_on_bit);
319
320int __sched out_of_line_wait_on_bit_timeout(
321 void *word, int bit, wait_bit_action_f *action,
322 unsigned mode, unsigned long timeout)
323{
324 wait_queue_head_t *wq = bit_waitqueue(word, bit);
325 DEFINE_WAIT_BIT(wait, word, bit);
326
327 wait.key.timeout = jiffies + timeout;
328 return __wait_on_bit(wq, &wait, action, mode);
329}
330EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
331
332int __sched
333__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
334 wait_bit_action_f *action, unsigned mode)
335{
336 do {
337 int ret;
338
339 prepare_to_wait_exclusive(wq, &q->wait, mode);
340 if (!test_bit(q->key.bit_nr, q->key.flags))
341 continue;
342 ret = action(&q->key, mode);
343 if (!ret)
344 continue;
345 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
346 return ret;
347 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
348 finish_wait(wq, &q->wait);
349 return 0;
350}
351EXPORT_SYMBOL(__wait_on_bit_lock);
352
353int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
354 wait_bit_action_f *action, unsigned mode)
355{
356 wait_queue_head_t *wq = bit_waitqueue(word, bit);
357 DEFINE_WAIT_BIT(wait, word, bit);
358
359 return __wait_on_bit_lock(wq, &wait, action, mode);
360}
361EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
362
363void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
364{
365 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
366 if (waitqueue_active(wq))
367 __wake_up(wq, TASK_NORMAL, 1, &key);
368}
369EXPORT_SYMBOL(__wake_up_bit);
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388void wake_up_bit(void *word, int bit)
389{
390 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
391}
392EXPORT_SYMBOL(wake_up_bit);
393
394#define WAIT_TABLE_BITS 8
395#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
396static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
397
398wait_queue_head_t *bit_waitqueue(void *word, int bit)
399{
400 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
401 struct page *page = is_vmalloc_addr(word) ?
402 vmalloc_to_page(word) : virt_to_page(word);
403 const struct zone *zone = page_zone(page);
404 unsigned long val = (unsigned long)word << shift | bit;
405
406 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
407}
408EXPORT_SYMBOL(bit_waitqueue);
409
410
411
412
413
414
415static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
416{
417 if (BITS_PER_LONG == 64) {
418 unsigned long q = (unsigned long)p;
419 return bit_waitqueue((void *)(q & ~1), q & 1);
420 }
421 return bit_waitqueue(p, 0);
422}
423
424static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
425 void *arg)
426{
427 struct wait_bit_key *key = arg;
428 struct wait_bit_queue *wait_bit
429 = container_of(wait, struct wait_bit_queue, wait);
430 atomic_t *val = key->flags;
431
432 if (wait_bit->key.flags != key->flags ||
433 wait_bit->key.bit_nr != key->bit_nr ||
434 atomic_read(val) != 0)
435 return 0;
436 return autoremove_wake_function(wait, mode, sync, key);
437}
438
439
440
441
442
443
444static __sched
445int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
446 int (*action)(atomic_t *), unsigned mode)
447{
448 atomic_t *val;
449 int ret = 0;
450
451 do {
452 prepare_to_wait(wq, &q->wait, mode);
453 val = q->key.flags;
454 if (atomic_read(val) == 0)
455 break;
456 ret = (*action)(val);
457 } while (!ret && atomic_read(val) != 0);
458 finish_wait(wq, &q->wait);
459 return ret;
460}
461
462#define DEFINE_WAIT_ATOMIC_T(name, p) \
463 struct wait_bit_queue name = { \
464 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
465 .wait = { \
466 .private = current, \
467 .func = wake_atomic_t_function, \
468 .task_list = \
469 LIST_HEAD_INIT((name).wait.task_list), \
470 }, \
471 }
472
473__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
474 unsigned mode)
475{
476 wait_queue_head_t *wq = atomic_t_waitqueue(p);
477 DEFINE_WAIT_ATOMIC_T(wait, p);
478
479 return __wait_on_atomic_t(wq, &wait, action, mode);
480}
481EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
482
483
484
485
486
487
488
489
490
491
492void wake_up_atomic_t(atomic_t *p)
493{
494 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
495}
496EXPORT_SYMBOL(wake_up_atomic_t);
497
498__sched int bit_wait(struct wait_bit_key *word, int mode)
499{
500 schedule();
501 if (signal_pending_state(mode, current))
502 return -EINTR;
503 return 0;
504}
505EXPORT_SYMBOL(bit_wait);
506
507__sched int bit_wait_io(struct wait_bit_key *word, int mode)
508{
509 io_schedule();
510 if (signal_pending_state(mode, current))
511 return -EINTR;
512 return 0;
513}
514EXPORT_SYMBOL(bit_wait_io);
515
516__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
517{
518 unsigned long now = ACCESS_ONCE(jiffies);
519 if (time_after_eq(now, word->timeout))
520 return -EAGAIN;
521 schedule_timeout(word->timeout - now);
522 if (signal_pending_state(mode, current))
523 return -EINTR;
524 return 0;
525}
526EXPORT_SYMBOL_GPL(bit_wait_timeout);
527
528__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
529{
530 unsigned long now = ACCESS_ONCE(jiffies);
531 if (time_after_eq(now, word->timeout))
532 return -EAGAIN;
533 io_schedule_timeout(word->timeout - now);
534 if (signal_pending_state(mode, current))
535 return -EINTR;
536 return 0;
537}
538EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
539
540void init_wait_var_entry(struct wait_bit_queue *wbq_entry, void *var, int flags)
541{
542 *wbq_entry = (struct wait_bit_queue){
543 .key = {
544 .flags = (var),
545 .bit_nr = -1,
546 },
547 .wait = {
548 .private = current,
549 .func = var_wake_function,
550 .task_list = LIST_HEAD_INIT(wbq_entry->wait.task_list),
551 },
552 };
553}
554EXPORT_SYMBOL(init_wait_var_entry);
555
556void wake_up_var(void *var)
557{
558 __wake_up_bit(__var_waitqueue(var), var, -1);
559}
560EXPORT_SYMBOL(wake_up_var);
561
562wait_queue_head_t *__var_waitqueue(void *p)
563{
564 return bit_wait_table + hash_ptr(p, WAIT_TABLE_BITS);
565}
566EXPORT_SYMBOL(__var_waitqueue);
567
568void __init wait_bit_init(void)
569{
570 int i;
571
572 for (i = 0; i < WAIT_TABLE_SIZE; i++)
573 init_waitqueue_head(bit_wait_table + i);
574}
575