1
2#ifndef _LINUX_WAIT_H
3#define _LINUX_WAIT_H
4
5
6
7#include <linux/list.h>
8#include <linux/stddef.h>
9#include <linux/spinlock.h>
10
11#include <asm/current.h>
12#include <uapi/linux/wait.h>
13
14typedef struct wait_queue_entry wait_queue_entry_t;
15
16typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19
20#define WQ_FLAG_EXCLUSIVE 0x01
21#define WQ_FLAG_WOKEN 0x02
22#define WQ_FLAG_BOOKMARK 0x04
23#define WQ_FLAG_CUSTOM 0x08
24#define WQ_FLAG_DONE 0x10
25#define WQ_FLAG_PRIORITY 0x20
26
27
28
29
30struct wait_queue_entry {
31 unsigned int flags;
32 void *private;
33 wait_queue_func_t func;
34 struct list_head entry;
35};
36
37struct wait_queue_head {
38 spinlock_t lock;
39 struct list_head head;
40};
41typedef struct wait_queue_head wait_queue_head_t;
42
43struct task_struct;
44
45
46
47
48
49#define __WAITQUEUE_INITIALIZER(name, tsk) { \
50 .private = tsk, \
51 .func = default_wake_function, \
52 .entry = { NULL, NULL } }
53
54#define DECLARE_WAITQUEUE(name, tsk) \
55 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
56
57#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
58 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
59 .head = LIST_HEAD_INIT(name.head) }
60
61#define DECLARE_WAIT_QUEUE_HEAD(name) \
62 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
63
64extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
65
66#define init_waitqueue_head(wq_head) \
67 do { \
68 static struct lock_class_key __key; \
69 \
70 __init_waitqueue_head((wq_head), #wq_head, &__key); \
71 } while (0)
72
73#ifdef CONFIG_LOCKDEP
74# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
75 ({ init_waitqueue_head(&name); name; })
76# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
77 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
78#else
79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
80#endif
81
82static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
83{
84 wq_entry->flags = 0;
85 wq_entry->private = p;
86 wq_entry->func = default_wake_function;
87}
88
89static inline void
90init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
91{
92 wq_entry->flags = 0;
93 wq_entry->private = NULL;
94 wq_entry->func = func;
95}
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127static inline int waitqueue_active(struct wait_queue_head *wq_head)
128{
129 return !list_empty(&wq_head->head);
130}
131
132
133
134
135
136
137
138
139
140static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
141{
142 return list_is_singular(&wq_head->head);
143}
144
145
146
147
148
149
150
151
152
153static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
154{
155
156
157
158
159
160
161
162 smp_mb();
163 return waitqueue_active(wq_head);
164}
165
166extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
169extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
170
171static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
172{
173 struct list_head *head = &wq_head->head;
174 struct wait_queue_entry *wq;
175
176 list_for_each_entry(wq, &wq_head->head, entry) {
177 if (!(wq->flags & WQ_FLAG_PRIORITY))
178 break;
179 head = &wq->entry;
180 }
181 list_add(&wq_entry->entry, head);
182}
183
184
185
186
187static inline void
188__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
189{
190 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
191 __add_wait_queue(wq_head, wq_entry);
192}
193
194static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
195{
196 list_add_tail(&wq_entry->entry, &wq_head->head);
197}
198
199static inline void
200__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
201{
202 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
203 __add_wait_queue_entry_tail(wq_head, wq_entry);
204}
205
206static inline void
207__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
208{
209 list_del(&wq_entry->entry);
210}
211
212void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
213void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
214void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
215 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
216void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
217void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
218void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
219void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
220
221#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
222#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
223#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
224#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
225#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
226
227#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
228#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
229#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
230#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
231
232
233
234
235#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
236#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
237#define wake_up_poll(x, m) \
238 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
239#define wake_up_locked_poll(x, m) \
240 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
241#define wake_up_interruptible_poll(x, m) \
242 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
243#define wake_up_interruptible_sync_poll(x, m) \
244 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
245#define wake_up_interruptible_sync_poll_locked(x, m) \
246 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
247
248#define ___wait_cond_timeout(condition) \
249({ \
250 bool __cond = (condition); \
251 if (__cond && !__ret) \
252 __ret = 1; \
253 __cond || !__ret; \
254})
255
256#define ___wait_is_interruptible(state) \
257 (!__builtin_constant_p(state) || \
258 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
259
260extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
261
262
263
264
265
266
267
268
269
270
271
272
273
274#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
275({ \
276 __label__ __out; \
277 struct wait_queue_entry __wq_entry; \
278 long __ret = ret; \
279 \
280 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
281 for (;;) { \
282 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
283 \
284 if (condition) \
285 break; \
286 \
287 if (___wait_is_interruptible(state) && __int) { \
288 __ret = __int; \
289 goto __out; \
290 } \
291 \
292 cmd; \
293 } \
294 finish_wait(&wq_head, &__wq_entry); \
295__out: __ret; \
296})
297
298#define __wait_event(wq_head, condition) \
299 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
300 schedule())
301
302
303
304
305
306
307
308
309
310
311
312
313
314#define wait_event(wq_head, condition) \
315do { \
316 might_sleep(); \
317 if (condition) \
318 break; \
319 __wait_event(wq_head, condition); \
320} while (0)
321
322#define __io_wait_event(wq_head, condition) \
323 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
324 io_schedule())
325
326
327
328
329#define io_wait_event(wq_head, condition) \
330do { \
331 might_sleep(); \
332 if (condition) \
333 break; \
334 __io_wait_event(wq_head, condition); \
335} while (0)
336
337#define __wait_event_freezable(wq_head, condition) \
338 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
339 freezable_schedule())
340
341
342
343
344
345
346
347
348
349
350
351
352
353#define wait_event_freezable(wq_head, condition) \
354({ \
355 int __ret = 0; \
356 might_sleep(); \
357 if (!(condition)) \
358 __ret = __wait_event_freezable(wq_head, condition); \
359 __ret; \
360})
361
362#define __wait_event_timeout(wq_head, condition, timeout) \
363 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
364 TASK_UNINTERRUPTIBLE, 0, timeout, \
365 __ret = schedule_timeout(__ret))
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386#define wait_event_timeout(wq_head, condition, timeout) \
387({ \
388 long __ret = timeout; \
389 might_sleep(); \
390 if (!___wait_cond_timeout(condition)) \
391 __ret = __wait_event_timeout(wq_head, condition, timeout); \
392 __ret; \
393})
394
395#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
396 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
397 TASK_INTERRUPTIBLE, 0, timeout, \
398 __ret = freezable_schedule_timeout(__ret))
399
400
401
402
403
404#define wait_event_freezable_timeout(wq_head, condition, timeout) \
405({ \
406 long __ret = timeout; \
407 might_sleep(); \
408 if (!___wait_cond_timeout(condition)) \
409 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
410 __ret; \
411})
412
413#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
414 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
415 cmd1; schedule(); cmd2)
416
417
418
419#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
420do { \
421 if (condition) \
422 break; \
423 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
424} while (0)
425
426#define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
427 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
428 cmd1; schedule(); cmd2)
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444#define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
445do { \
446 if (condition) \
447 break; \
448 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
449} while (0)
450
451#define __wait_event_interruptible(wq_head, condition) \
452 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
453 schedule())
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470#define wait_event_interruptible(wq_head, condition) \
471({ \
472 int __ret = 0; \
473 might_sleep(); \
474 if (!(condition)) \
475 __ret = __wait_event_interruptible(wq_head, condition); \
476 __ret; \
477})
478
479#define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
480 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
481 TASK_INTERRUPTIBLE, 0, timeout, \
482 __ret = schedule_timeout(__ret))
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504#define wait_event_interruptible_timeout(wq_head, condition, timeout) \
505({ \
506 long __ret = timeout; \
507 might_sleep(); \
508 if (!___wait_cond_timeout(condition)) \
509 __ret = __wait_event_interruptible_timeout(wq_head, \
510 condition, timeout); \
511 __ret; \
512})
513
514#define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
515({ \
516 int __ret = 0; \
517 struct hrtimer_sleeper __t; \
518 \
519 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
520 HRTIMER_MODE_REL); \
521 if ((timeout) != KTIME_MAX) \
522 hrtimer_start_range_ns(&__t.timer, timeout, \
523 current->timer_slack_ns, \
524 HRTIMER_MODE_REL); \
525 \
526 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
527 if (!__t.task) { \
528 __ret = -ETIME; \
529 break; \
530 } \
531 schedule()); \
532 \
533 hrtimer_cancel(&__t.timer); \
534 destroy_hrtimer_on_stack(&__t.timer); \
535 __ret; \
536})
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554#define wait_event_hrtimeout(wq_head, condition, timeout) \
555({ \
556 int __ret = 0; \
557 might_sleep(); \
558 if (!(condition)) \
559 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
560 TASK_UNINTERRUPTIBLE); \
561 __ret; \
562})
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
581({ \
582 long __ret = 0; \
583 might_sleep(); \
584 if (!(condition)) \
585 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
586 TASK_INTERRUPTIBLE); \
587 __ret; \
588})
589
590#define __wait_event_interruptible_exclusive(wq, condition) \
591 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
592 schedule())
593
594#define wait_event_interruptible_exclusive(wq, condition) \
595({ \
596 int __ret = 0; \
597 might_sleep(); \
598 if (!(condition)) \
599 __ret = __wait_event_interruptible_exclusive(wq, condition); \
600 __ret; \
601})
602
603#define __wait_event_killable_exclusive(wq, condition) \
604 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
605 schedule())
606
607#define wait_event_killable_exclusive(wq, condition) \
608({ \
609 int __ret = 0; \
610 might_sleep(); \
611 if (!(condition)) \
612 __ret = __wait_event_killable_exclusive(wq, condition); \
613 __ret; \
614})
615
616
617#define __wait_event_freezable_exclusive(wq, condition) \
618 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
619 freezable_schedule())
620
621#define wait_event_freezable_exclusive(wq, condition) \
622({ \
623 int __ret = 0; \
624 might_sleep(); \
625 if (!(condition)) \
626 __ret = __wait_event_freezable_exclusive(wq, condition); \
627 __ret; \
628})
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643#define wait_event_idle(wq_head, condition) \
644do { \
645 might_sleep(); \
646 if (!(condition)) \
647 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
648} while (0)
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667#define wait_event_idle_exclusive(wq_head, condition) \
668do { \
669 might_sleep(); \
670 if (!(condition)) \
671 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
672} while (0)
673
674#define __wait_event_idle_timeout(wq_head, condition, timeout) \
675 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
676 TASK_IDLE, 0, timeout, \
677 __ret = schedule_timeout(__ret))
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698#define wait_event_idle_timeout(wq_head, condition, timeout) \
699({ \
700 long __ret = timeout; \
701 might_sleep(); \
702 if (!___wait_cond_timeout(condition)) \
703 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
704 __ret; \
705})
706
707#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
708 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
709 TASK_IDLE, 1, timeout, \
710 __ret = schedule_timeout(__ret))
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
736({ \
737 long __ret = timeout; \
738 might_sleep(); \
739 if (!___wait_cond_timeout(condition)) \
740 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
741 __ret; \
742})
743
744extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
745extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
746
747#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
748({ \
749 int __ret; \
750 DEFINE_WAIT(__wait); \
751 if (exclusive) \
752 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
753 do { \
754 __ret = fn(&(wq), &__wait); \
755 if (__ret) \
756 break; \
757 } while (!(condition)); \
758 __remove_wait_queue(&(wq), &__wait); \
759 __set_current_state(TASK_RUNNING); \
760 __ret; \
761})
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787#define wait_event_interruptible_locked(wq, condition) \
788 ((condition) \
789 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814#define wait_event_interruptible_locked_irq(wq, condition) \
815 ((condition) \
816 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845#define wait_event_interruptible_exclusive_locked(wq, condition) \
846 ((condition) \
847 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
877 ((condition) \
878 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
879
880
881#define __wait_event_killable(wq, condition) \
882 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899#define wait_event_killable(wq_head, condition) \
900({ \
901 int __ret = 0; \
902 might_sleep(); \
903 if (!(condition)) \
904 __ret = __wait_event_killable(wq_head, condition); \
905 __ret; \
906})
907
908#define __wait_event_killable_timeout(wq_head, condition, timeout) \
909 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
910 TASK_KILLABLE, 0, timeout, \
911 __ret = schedule_timeout(__ret))
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935#define wait_event_killable_timeout(wq_head, condition, timeout) \
936({ \
937 long __ret = timeout; \
938 might_sleep(); \
939 if (!___wait_cond_timeout(condition)) \
940 __ret = __wait_event_killable_timeout(wq_head, \
941 condition, timeout); \
942 __ret; \
943})
944
945
946#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
947 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
948 spin_unlock_irq(&lock); \
949 cmd; \
950 schedule(); \
951 spin_lock_irq(&lock))
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
977do { \
978 if (condition) \
979 break; \
980 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
981} while (0)
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003#define wait_event_lock_irq(wq_head, condition, lock) \
1004do { \
1005 if (condition) \
1006 break; \
1007 __wait_event_lock_irq(wq_head, condition, lock, ); \
1008} while (0)
1009
1010
1011#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1012 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1013 spin_unlock_irq(&lock); \
1014 cmd; \
1015 schedule(); \
1016 spin_lock_irq(&lock))
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1044({ \
1045 int __ret = 0; \
1046 if (!(condition)) \
1047 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1048 condition, lock, cmd); \
1049 __ret; \
1050})
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074#define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1075({ \
1076 int __ret = 0; \
1077 if (!(condition)) \
1078 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1079 condition, lock,); \
1080 __ret; \
1081})
1082
1083#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1084 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1085 state, 0, timeout, \
1086 spin_unlock_irq(&lock); \
1087 __ret = schedule_timeout(__ret); \
1088 spin_lock_irq(&lock));
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1115 timeout) \
1116({ \
1117 long __ret = timeout; \
1118 if (!___wait_cond_timeout(condition)) \
1119 __ret = __wait_event_lock_irq_timeout( \
1120 wq_head, condition, lock, timeout, \
1121 TASK_INTERRUPTIBLE); \
1122 __ret; \
1123})
1124
1125#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1126({ \
1127 long __ret = timeout; \
1128 if (!___wait_cond_timeout(condition)) \
1129 __ret = __wait_event_lock_irq_timeout( \
1130 wq_head, condition, lock, timeout, \
1131 TASK_UNINTERRUPTIBLE); \
1132 __ret; \
1133})
1134
1135
1136
1137
1138void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1139bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1140long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1141void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1142long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1143int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1144int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1145
1146#define DEFINE_WAIT_FUNC(name, function) \
1147 struct wait_queue_entry name = { \
1148 .private = current, \
1149 .func = function, \
1150 .entry = LIST_HEAD_INIT((name).entry), \
1151 }
1152
1153#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1154
1155#define init_wait(wait) \
1156 do { \
1157 (wait)->private = current; \
1158 (wait)->func = autoremove_wake_function; \
1159 INIT_LIST_HEAD(&(wait)->entry); \
1160 (wait)->flags = 0; \
1161 } while (0)
1162
1163bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1164
1165#endif
1166