1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
4
5#include <linux/list.h>
6#include <linux/stddef.h>
7#include <linux/spinlock.h>
8#include <asm/current.h>
9#include <uapi/linux/wait.h>
10
11typedef struct __wait_queue wait_queue_t;
12typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
14
15struct __wait_queue {
16 unsigned int flags;
17#define WQ_FLAG_EXCLUSIVE 0x01
18 void *private;
19 wait_queue_func_t func;
20 struct list_head task_list;
21};
22
23struct wait_bit_key {
24 void *flags;
25 int bit_nr;
26#define WAIT_ATOMIC_T_BIT_NR -1
27};
28
29struct wait_bit_queue {
30 struct wait_bit_key key;
31 wait_queue_t wait;
32};
33
34struct __wait_queue_head {
35 spinlock_t lock;
36 struct list_head task_list;
37};
38typedef struct __wait_queue_head wait_queue_head_t;
39
40struct task_struct;
41
42
43
44
45
46#define __WAITQUEUE_INITIALIZER(name, tsk) { \
47 .private = tsk, \
48 .func = default_wake_function, \
49 .task_list = { NULL, NULL } }
50
51#define DECLARE_WAITQUEUE(name, tsk) \
52 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
53
54#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
55 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
56 .task_list = { &(name).task_list, &(name).task_list } }
57
58#define DECLARE_WAIT_QUEUE_HEAD(name) \
59 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60
61#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
62 { .flags = word, .bit_nr = bit, }
63
64#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
65 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
66
67extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
68
69#define init_waitqueue_head(q) \
70 do { \
71 static struct lock_class_key __key; \
72 \
73 __init_waitqueue_head((q), #q, &__key); \
74 } while (0)
75
76#ifdef CONFIG_LOCKDEP
77# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
78 ({ init_waitqueue_head(&name); name; })
79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
80 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
81#else
82# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
83#endif
84
85static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
86{
87 q->flags = 0;
88 q->private = p;
89 q->func = default_wake_function;
90}
91
92static inline void init_waitqueue_func_entry(wait_queue_t *q,
93 wait_queue_func_t func)
94{
95 q->flags = 0;
96 q->private = NULL;
97 q->func = func;
98}
99
100static inline int waitqueue_active(wait_queue_head_t *q)
101{
102 return !list_empty(&q->task_list);
103}
104
105extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
106extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
107extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
108
109static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
110{
111 list_add(&new->task_list, &head->task_list);
112}
113
114
115
116
117static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
118 wait_queue_t *wait)
119{
120 wait->flags |= WQ_FLAG_EXCLUSIVE;
121 __add_wait_queue(q, wait);
122}
123
124static inline void __add_wait_queue_tail(wait_queue_head_t *head,
125 wait_queue_t *new)
126{
127 list_add_tail(&new->task_list, &head->task_list);
128}
129
130static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
132{
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue_tail(q, wait);
135}
136
137static inline void __remove_wait_queue(wait_queue_head_t *head,
138 wait_queue_t *old)
139{
140 list_del(&old->task_list);
141}
142
143void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
144void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
146 void *key);
147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
149void __wake_up_bit(wait_queue_head_t *, void *, int);
150int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152void wake_up_bit(void *, int);
153void wake_up_atomic_t(atomic_t *);
154int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
156int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
157wait_queue_head_t *bit_waitqueue(void *, int);
158
159#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
160#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
161#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
162#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
163#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
164
165#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
168#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
169
170
171
172
173#define wake_up_poll(x, m) \
174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
175#define wake_up_locked_poll(x, m) \
176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177#define wake_up_interruptible_poll(x, m) \
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
181
182#define __wait_event(wq, condition) \
183do { \
184 DEFINE_WAIT(__wait); \
185 \
186 for (;;) { \
187 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
188 if (condition) \
189 break; \
190 schedule(); \
191 } \
192 finish_wait(&wq, &__wait); \
193} while (0)
194
195
196
197
198
199
200
201
202
203
204
205
206
207#define wait_event(wq, condition) \
208do { \
209 if (condition) \
210 break; \
211 __wait_event(wq, condition); \
212} while (0)
213
214#define __wait_event_timeout(wq, condition, ret) \
215do { \
216 DEFINE_WAIT(__wait); \
217 \
218 for (;;) { \
219 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
220 if (condition) \
221 break; \
222 ret = schedule_timeout(ret); \
223 if (!ret) \
224 break; \
225 } \
226 if (!ret && (condition)) \
227 ret = 1; \
228 finish_wait(&wq, &__wait); \
229} while (0)
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248#define wait_event_timeout(wq, condition, timeout) \
249({ \
250 long __ret = timeout; \
251 if (!(condition)) \
252 __wait_event_timeout(wq, condition, __ret); \
253 __ret; \
254})
255
256#define __wait_event_interruptible(wq, condition, ret) \
257do { \
258 DEFINE_WAIT(__wait); \
259 \
260 for (;;) { \
261 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
262 if (condition) \
263 break; \
264 if (!signal_pending(current)) { \
265 schedule(); \
266 continue; \
267 } \
268 ret = -ERESTARTSYS; \
269 break; \
270 } \
271 finish_wait(&wq, &__wait); \
272} while (0)
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289#define wait_event_interruptible(wq, condition) \
290({ \
291 int __ret = 0; \
292 if (!(condition)) \
293 __wait_event_interruptible(wq, condition, __ret); \
294 __ret; \
295})
296
297#define __wait_event_interruptible_timeout(wq, condition, ret) \
298do { \
299 DEFINE_WAIT(__wait); \
300 \
301 for (;;) { \
302 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
303 if (condition) \
304 break; \
305 if (!signal_pending(current)) { \
306 ret = schedule_timeout(ret); \
307 if (!ret) \
308 break; \
309 continue; \
310 } \
311 ret = -ERESTARTSYS; \
312 break; \
313 } \
314 if (!ret && (condition)) \
315 ret = 1; \
316 finish_wait(&wq, &__wait); \
317} while (0)
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337#define wait_event_interruptible_timeout(wq, condition, timeout) \
338({ \
339 long __ret = timeout; \
340 if (!(condition)) \
341 __wait_event_interruptible_timeout(wq, condition, __ret); \
342 __ret; \
343})
344
345#define __wait_event_hrtimeout(wq, condition, timeout, state) \
346({ \
347 int __ret = 0; \
348 DEFINE_WAIT(__wait); \
349 struct hrtimer_sleeper __t; \
350 \
351 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
352 HRTIMER_MODE_REL); \
353 hrtimer_init_sleeper(&__t, current); \
354 if ((timeout).tv64 != KTIME_MAX) \
355 hrtimer_start_range_ns(&__t.timer, timeout, \
356 current->timer_slack_ns, \
357 HRTIMER_MODE_REL); \
358 \
359 for (;;) { \
360 prepare_to_wait(&wq, &__wait, state); \
361 if (condition) \
362 break; \
363 if (state == TASK_INTERRUPTIBLE && \
364 signal_pending(current)) { \
365 __ret = -ERESTARTSYS; \
366 break; \
367 } \
368 if (!__t.task) { \
369 __ret = -ETIME; \
370 break; \
371 } \
372 schedule(); \
373 } \
374 \
375 hrtimer_cancel(&__t.timer); \
376 destroy_hrtimer_on_stack(&__t.timer); \
377 finish_wait(&wq, &__wait); \
378 __ret; \
379})
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397#define wait_event_hrtimeout(wq, condition, timeout) \
398({ \
399 int __ret = 0; \
400 if (!(condition)) \
401 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
402 TASK_UNINTERRUPTIBLE); \
403 __ret; \
404})
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
423({ \
424 long __ret = 0; \
425 if (!(condition)) \
426 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
427 TASK_INTERRUPTIBLE); \
428 __ret; \
429})
430
431#define __wait_event_interruptible_exclusive(wq, condition, ret) \
432do { \
433 DEFINE_WAIT(__wait); \
434 \
435 for (;;) { \
436 prepare_to_wait_exclusive(&wq, &__wait, \
437 TASK_INTERRUPTIBLE); \
438 if (condition) { \
439 finish_wait(&wq, &__wait); \
440 break; \
441 } \
442 if (!signal_pending(current)) { \
443 schedule(); \
444 continue; \
445 } \
446 ret = -ERESTARTSYS; \
447 abort_exclusive_wait(&wq, &__wait, \
448 TASK_INTERRUPTIBLE, NULL); \
449 break; \
450 } \
451} while (0)
452
453#define wait_event_interruptible_exclusive(wq, condition) \
454({ \
455 int __ret = 0; \
456 if (!(condition)) \
457 __wait_event_interruptible_exclusive(wq, condition, __ret);\
458 __ret; \
459})
460
461
462#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
463({ \
464 int __ret = 0; \
465 DEFINE_WAIT(__wait); \
466 if (exclusive) \
467 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
468 do { \
469 if (likely(list_empty(&__wait.task_list))) \
470 __add_wait_queue_tail(&(wq), &__wait); \
471 set_current_state(TASK_INTERRUPTIBLE); \
472 if (signal_pending(current)) { \
473 __ret = -ERESTARTSYS; \
474 break; \
475 } \
476 if (irq) \
477 spin_unlock_irq(&(wq).lock); \
478 else \
479 spin_unlock(&(wq).lock); \
480 schedule(); \
481 if (irq) \
482 spin_lock_irq(&(wq).lock); \
483 else \
484 spin_lock(&(wq).lock); \
485 } while (!(condition)); \
486 __remove_wait_queue(&(wq), &__wait); \
487 __set_current_state(TASK_RUNNING); \
488 __ret; \
489})
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515#define wait_event_interruptible_locked(wq, condition) \
516 ((condition) \
517 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542#define wait_event_interruptible_locked_irq(wq, condition) \
543 ((condition) \
544 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573#define wait_event_interruptible_exclusive_locked(wq, condition) \
574 ((condition) \
575 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
605 ((condition) \
606 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
607
608
609
610#define __wait_event_killable(wq, condition, ret) \
611do { \
612 DEFINE_WAIT(__wait); \
613 \
614 for (;;) { \
615 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
616 if (condition) \
617 break; \
618 if (!fatal_signal_pending(current)) { \
619 schedule(); \
620 continue; \
621 } \
622 ret = -ERESTARTSYS; \
623 break; \
624 } \
625 finish_wait(&wq, &__wait); \
626} while (0)
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643#define wait_event_killable(wq, condition) \
644({ \
645 int __ret = 0; \
646 if (!(condition)) \
647 __wait_event_killable(wq, condition, __ret); \
648 __ret; \
649})
650
651
652#define __wait_event_lock_irq(wq, condition, lock, cmd) \
653do { \
654 DEFINE_WAIT(__wait); \
655 \
656 for (;;) { \
657 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
658 if (condition) \
659 break; \
660 spin_unlock_irq(&lock); \
661 cmd; \
662 schedule(); \
663 spin_lock_irq(&lock); \
664 } \
665 finish_wait(&wq, &__wait); \
666} while (0)
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
692do { \
693 if (condition) \
694 break; \
695 __wait_event_lock_irq(wq, condition, lock, cmd); \
696} while (0)
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718#define wait_event_lock_irq(wq, condition, lock) \
719do { \
720 if (condition) \
721 break; \
722 __wait_event_lock_irq(wq, condition, lock, ); \
723} while (0)
724
725
726#define __wait_event_interruptible_lock_irq(wq, condition, \
727 lock, ret, cmd) \
728do { \
729 DEFINE_WAIT(__wait); \
730 \
731 for (;;) { \
732 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
733 if (condition) \
734 break; \
735 if (signal_pending(current)) { \
736 ret = -ERESTARTSYS; \
737 break; \
738 } \
739 spin_unlock_irq(&lock); \
740 cmd; \
741 schedule(); \
742 spin_lock_irq(&lock); \
743 } \
744 finish_wait(&wq, &__wait); \
745} while (0)
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
773({ \
774 int __ret = 0; \
775 \
776 if (!(condition)) \
777 __wait_event_interruptible_lock_irq(wq, condition, \
778 lock, __ret, cmd); \
779 __ret; \
780})
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804#define wait_event_interruptible_lock_irq(wq, condition, lock) \
805({ \
806 int __ret = 0; \
807 \
808 if (!(condition)) \
809 __wait_event_interruptible_lock_irq(wq, condition, \
810 lock, __ret, ); \
811 __ret; \
812})
813
814#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
815 lock, ret) \
816do { \
817 DEFINE_WAIT(__wait); \
818 \
819 for (;;) { \
820 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
821 if (condition) \
822 break; \
823 if (signal_pending(current)) { \
824 ret = -ERESTARTSYS; \
825 break; \
826 } \
827 spin_unlock_irq(&lock); \
828 ret = schedule_timeout(ret); \
829 spin_lock_irq(&lock); \
830 if (!ret) \
831 break; \
832 } \
833 finish_wait(&wq, &__wait); \
834} while (0)
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
861 timeout) \
862({ \
863 int __ret = timeout; \
864 \
865 if (!(condition)) \
866 __wait_event_interruptible_lock_irq_timeout( \
867 wq, condition, lock, __ret); \
868 __ret; \
869})
870
871
872
873
874
875
876
877extern void sleep_on(wait_queue_head_t *q);
878extern long sleep_on_timeout(wait_queue_head_t *q,
879 signed long timeout);
880extern void interruptible_sleep_on(wait_queue_head_t *q);
881extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
882 signed long timeout);
883
884
885
886
887void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
888void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
889void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
890void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
891 unsigned int mode, void *key);
892int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
893int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
894
895#define DEFINE_WAIT_FUNC(name, function) \
896 wait_queue_t name = { \
897 .private = current, \
898 .func = function, \
899 .task_list = LIST_HEAD_INIT((name).task_list), \
900 }
901
902#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
903
904#define DEFINE_WAIT_BIT(name, word, bit) \
905 struct wait_bit_queue name = { \
906 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
907 .wait = { \
908 .private = current, \
909 .func = wake_bit_function, \
910 .task_list = \
911 LIST_HEAD_INIT((name).wait.task_list), \
912 }, \
913 }
914
915#define init_wait(wait) \
916 do { \
917 (wait)->private = current; \
918 (wait)->func = autoremove_wake_function; \
919 INIT_LIST_HEAD(&(wait)->task_list); \
920 (wait)->flags = 0; \
921 } while (0)
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937static inline int wait_on_bit(void *word, int bit,
938 int (*action)(void *), unsigned mode)
939{
940 if (!test_bit(bit, word))
941 return 0;
942 return out_of_line_wait_on_bit(word, bit, action, mode);
943}
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961static inline int wait_on_bit_lock(void *word, int bit,
962 int (*action)(void *), unsigned mode)
963{
964 if (!test_and_set_bit(bit, word))
965 return 0;
966 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
967}
968
969
970
971
972
973
974
975
976
977
978
979static inline
980int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
981{
982 if (atomic_read(val) == 0)
983 return 0;
984 return out_of_line_wait_on_atomic_t(val, action, mode);
985}
986
987#endif
988