1
2#ifndef _LINUX_WAIT_H
3#define _LINUX_WAIT_H
4
5
6
7#include <linux/list.h>
8#include <linux/stddef.h>
9#include <linux/spinlock.h>
10
11#include <asm/current.h>
12#include <uapi/linux/wait.h>
13
14typedef struct wait_queue_entry wait_queue_entry_t;
15
16typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19
20#define WQ_FLAG_EXCLUSIVE 0x01
21#define WQ_FLAG_WOKEN 0x02
22#define WQ_FLAG_BOOKMARK 0x04
23#define WQ_FLAG_CUSTOM 0x08
24#define WQ_FLAG_DONE 0x10
25
26
27
28
29struct wait_queue_entry {
30 unsigned int flags;
31 void *private;
32 wait_queue_func_t func;
33 struct list_head entry;
34};
35
36struct wait_queue_head {
37 spinlock_t lock;
38 struct list_head head;
39};
40typedef struct wait_queue_head wait_queue_head_t;
41
42struct task_struct;
43
44
45
46
47
48#define __WAITQUEUE_INITIALIZER(name, tsk) { \
49 .private = tsk, \
50 .func = default_wake_function, \
51 .entry = { NULL, NULL } }
52
53#define DECLARE_WAITQUEUE(name, tsk) \
54 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
55
56#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
57 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
58 .head = { &(name).head, &(name).head } }
59
60#define DECLARE_WAIT_QUEUE_HEAD(name) \
61 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62
63extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
64
65#define init_waitqueue_head(wq_head) \
66 do { \
67 static struct lock_class_key __key; \
68 \
69 __init_waitqueue_head((wq_head), #wq_head, &__key); \
70 } while (0)
71
72#ifdef CONFIG_LOCKDEP
73# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74 ({ init_waitqueue_head(&name); name; })
75# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
76 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77#else
78# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
79#endif
80
81static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
82{
83 wq_entry->flags = 0;
84 wq_entry->private = p;
85 wq_entry->func = default_wake_function;
86}
87
88static inline void
89init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
90{
91 wq_entry->flags = 0;
92 wq_entry->private = NULL;
93 wq_entry->func = func;
94}
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126static inline int waitqueue_active(struct wait_queue_head *wq_head)
127{
128 return !list_empty(&wq_head->head);
129}
130
131
132
133
134
135
136
137
138
139static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
140{
141 return list_is_singular(&wq_head->head);
142}
143
144
145
146
147
148
149
150
151
152static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
153{
154
155
156
157
158
159
160
161 smp_mb();
162 return waitqueue_active(wq_head);
163}
164
165extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168
169static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
170{
171 list_add(&wq_entry->entry, &wq_head->head);
172}
173
174
175
176
177static inline void
178__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
179{
180 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
181 __add_wait_queue(wq_head, wq_entry);
182}
183
184static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
185{
186 list_add_tail(&wq_entry->entry, &wq_head->head);
187}
188
189static inline void
190__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
191{
192 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
193 __add_wait_queue_entry_tail(wq_head, wq_entry);
194}
195
196static inline void
197__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
198{
199 list_del(&wq_entry->entry);
200}
201
202void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
203void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
204void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
205 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
206void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
207void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
208void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
210
211#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
212#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
213#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
214#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
215#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
216
217#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
218#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
219#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
220#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
221
222
223
224
225#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
226#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
227#define wake_up_poll(x, m) \
228 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
229#define wake_up_locked_poll(x, m) \
230 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
231#define wake_up_interruptible_poll(x, m) \
232 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
233#define wake_up_interruptible_sync_poll(x, m) \
234 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
235#define wake_up_interruptible_sync_poll_locked(x, m) \
236 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
237
238#define ___wait_cond_timeout(condition) \
239({ \
240 bool __cond = (condition); \
241 if (__cond && !__ret) \
242 __ret = 1; \
243 __cond || !__ret; \
244})
245
246#define ___wait_is_interruptible(state) \
247 (!__builtin_constant_p(state) || \
248 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
249
250extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
251
252
253
254
255
256
257
258
259
260
261
262
263
264#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
265({ \
266 __label__ __out; \
267 struct wait_queue_entry __wq_entry; \
268 long __ret = ret; \
269 \
270 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
271 for (;;) { \
272 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
273 \
274 if (condition) \
275 break; \
276 \
277 if (___wait_is_interruptible(state) && __int) { \
278 __ret = __int; \
279 goto __out; \
280 } \
281 \
282 cmd; \
283 } \
284 finish_wait(&wq_head, &__wq_entry); \
285__out: __ret; \
286})
287
288#define __wait_event(wq_head, condition) \
289 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
290 schedule())
291
292
293
294
295
296
297
298
299
300
301
302
303
304#define wait_event(wq_head, condition) \
305do { \
306 might_sleep(); \
307 if (condition) \
308 break; \
309 __wait_event(wq_head, condition); \
310} while (0)
311
312#define __io_wait_event(wq_head, condition) \
313 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
314 io_schedule())
315
316
317
318
319#define io_wait_event(wq_head, condition) \
320do { \
321 might_sleep(); \
322 if (condition) \
323 break; \
324 __io_wait_event(wq_head, condition); \
325} while (0)
326
327#define __wait_event_freezable(wq_head, condition) \
328 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
329 freezable_schedule())
330
331
332
333
334
335
336
337
338
339
340
341
342
343#define wait_event_freezable(wq_head, condition) \
344({ \
345 int __ret = 0; \
346 might_sleep(); \
347 if (!(condition)) \
348 __ret = __wait_event_freezable(wq_head, condition); \
349 __ret; \
350})
351
352#define __wait_event_timeout(wq_head, condition, timeout) \
353 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
354 TASK_UNINTERRUPTIBLE, 0, timeout, \
355 __ret = schedule_timeout(__ret))
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376#define wait_event_timeout(wq_head, condition, timeout) \
377({ \
378 long __ret = timeout; \
379 might_sleep(); \
380 if (!___wait_cond_timeout(condition)) \
381 __ret = __wait_event_timeout(wq_head, condition, timeout); \
382 __ret; \
383})
384
385#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
386 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
387 TASK_INTERRUPTIBLE, 0, timeout, \
388 __ret = freezable_schedule_timeout(__ret))
389
390
391
392
393
394#define wait_event_freezable_timeout(wq_head, condition, timeout) \
395({ \
396 long __ret = timeout; \
397 might_sleep(); \
398 if (!___wait_cond_timeout(condition)) \
399 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
400 __ret; \
401})
402
403#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
404 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
405 cmd1; schedule(); cmd2)
406
407
408
409#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
410do { \
411 if (condition) \
412 break; \
413 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
414} while (0)
415
416#define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
417 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
418 cmd1; schedule(); cmd2)
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434#define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
435do { \
436 if (condition) \
437 break; \
438 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
439} while (0)
440
441#define __wait_event_interruptible(wq_head, condition) \
442 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
443 schedule())
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460#define wait_event_interruptible(wq_head, condition) \
461({ \
462 int __ret = 0; \
463 might_sleep(); \
464 if (!(condition)) \
465 __ret = __wait_event_interruptible(wq_head, condition); \
466 __ret; \
467})
468
469#define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
470 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
471 TASK_INTERRUPTIBLE, 0, timeout, \
472 __ret = schedule_timeout(__ret))
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494#define wait_event_interruptible_timeout(wq_head, condition, timeout) \
495({ \
496 long __ret = timeout; \
497 might_sleep(); \
498 if (!___wait_cond_timeout(condition)) \
499 __ret = __wait_event_interruptible_timeout(wq_head, \
500 condition, timeout); \
501 __ret; \
502})
503
504#define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
505({ \
506 int __ret = 0; \
507 struct hrtimer_sleeper __t; \
508 \
509 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
510 HRTIMER_MODE_REL); \
511 if ((timeout) != KTIME_MAX) \
512 hrtimer_start_range_ns(&__t.timer, timeout, \
513 current->timer_slack_ns, \
514 HRTIMER_MODE_REL); \
515 \
516 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
517 if (!__t.task) { \
518 __ret = -ETIME; \
519 break; \
520 } \
521 schedule()); \
522 \
523 hrtimer_cancel(&__t.timer); \
524 destroy_hrtimer_on_stack(&__t.timer); \
525 __ret; \
526})
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544#define wait_event_hrtimeout(wq_head, condition, timeout) \
545({ \
546 int __ret = 0; \
547 might_sleep(); \
548 if (!(condition)) \
549 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
550 TASK_UNINTERRUPTIBLE); \
551 __ret; \
552})
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
571({ \
572 long __ret = 0; \
573 might_sleep(); \
574 if (!(condition)) \
575 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
576 TASK_INTERRUPTIBLE); \
577 __ret; \
578})
579
580#define __wait_event_interruptible_exclusive(wq, condition) \
581 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
582 schedule())
583
584#define wait_event_interruptible_exclusive(wq, condition) \
585({ \
586 int __ret = 0; \
587 might_sleep(); \
588 if (!(condition)) \
589 __ret = __wait_event_interruptible_exclusive(wq, condition); \
590 __ret; \
591})
592
593#define __wait_event_killable_exclusive(wq, condition) \
594 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
595 schedule())
596
597#define wait_event_killable_exclusive(wq, condition) \
598({ \
599 int __ret = 0; \
600 might_sleep(); \
601 if (!(condition)) \
602 __ret = __wait_event_killable_exclusive(wq, condition); \
603 __ret; \
604})
605
606
607#define __wait_event_freezable_exclusive(wq, condition) \
608 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
609 freezable_schedule())
610
611#define wait_event_freezable_exclusive(wq, condition) \
612({ \
613 int __ret = 0; \
614 might_sleep(); \
615 if (!(condition)) \
616 __ret = __wait_event_freezable_exclusive(wq, condition); \
617 __ret; \
618})
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633#define wait_event_idle(wq_head, condition) \
634do { \
635 might_sleep(); \
636 if (!(condition)) \
637 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
638} while (0)
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657#define wait_event_idle_exclusive(wq_head, condition) \
658do { \
659 might_sleep(); \
660 if (!(condition)) \
661 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
662} while (0)
663
664#define __wait_event_idle_timeout(wq_head, condition, timeout) \
665 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
666 TASK_IDLE, 0, timeout, \
667 __ret = schedule_timeout(__ret))
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688#define wait_event_idle_timeout(wq_head, condition, timeout) \
689({ \
690 long __ret = timeout; \
691 might_sleep(); \
692 if (!___wait_cond_timeout(condition)) \
693 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
694 __ret; \
695})
696
697#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
698 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
699 TASK_IDLE, 1, timeout, \
700 __ret = schedule_timeout(__ret))
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
726({ \
727 long __ret = timeout; \
728 might_sleep(); \
729 if (!___wait_cond_timeout(condition)) \
730 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
731 __ret; \
732})
733
734extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
735extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
736
737#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
738({ \
739 int __ret; \
740 DEFINE_WAIT(__wait); \
741 if (exclusive) \
742 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
743 do { \
744 __ret = fn(&(wq), &__wait); \
745 if (__ret) \
746 break; \
747 } while (!(condition)); \
748 __remove_wait_queue(&(wq), &__wait); \
749 __set_current_state(TASK_RUNNING); \
750 __ret; \
751})
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777#define wait_event_interruptible_locked(wq, condition) \
778 ((condition) \
779 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804#define wait_event_interruptible_locked_irq(wq, condition) \
805 ((condition) \
806 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835#define wait_event_interruptible_exclusive_locked(wq, condition) \
836 ((condition) \
837 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
867 ((condition) \
868 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
869
870
871#define __wait_event_killable(wq, condition) \
872 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889#define wait_event_killable(wq_head, condition) \
890({ \
891 int __ret = 0; \
892 might_sleep(); \
893 if (!(condition)) \
894 __ret = __wait_event_killable(wq_head, condition); \
895 __ret; \
896})
897
898#define __wait_event_killable_timeout(wq_head, condition, timeout) \
899 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
900 TASK_KILLABLE, 0, timeout, \
901 __ret = schedule_timeout(__ret))
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925#define wait_event_killable_timeout(wq_head, condition, timeout) \
926({ \
927 long __ret = timeout; \
928 might_sleep(); \
929 if (!___wait_cond_timeout(condition)) \
930 __ret = __wait_event_killable_timeout(wq_head, \
931 condition, timeout); \
932 __ret; \
933})
934
935
936#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
937 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
938 spin_unlock_irq(&lock); \
939 cmd; \
940 schedule(); \
941 spin_lock_irq(&lock))
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
967do { \
968 if (condition) \
969 break; \
970 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
971} while (0)
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993#define wait_event_lock_irq(wq_head, condition, lock) \
994do { \
995 if (condition) \
996 break; \
997 __wait_event_lock_irq(wq_head, condition, lock, ); \
998} while (0)
999
1000
1001#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1002 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1003 spin_unlock_irq(&lock); \
1004 cmd; \
1005 schedule(); \
1006 spin_lock_irq(&lock))
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1034({ \
1035 int __ret = 0; \
1036 if (!(condition)) \
1037 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1038 condition, lock, cmd); \
1039 __ret; \
1040})
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064#define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1065({ \
1066 int __ret = 0; \
1067 if (!(condition)) \
1068 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1069 condition, lock,); \
1070 __ret; \
1071})
1072
1073#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1074 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1075 state, 0, timeout, \
1076 spin_unlock_irq(&lock); \
1077 __ret = schedule_timeout(__ret); \
1078 spin_lock_irq(&lock));
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1105 timeout) \
1106({ \
1107 long __ret = timeout; \
1108 if (!___wait_cond_timeout(condition)) \
1109 __ret = __wait_event_lock_irq_timeout( \
1110 wq_head, condition, lock, timeout, \
1111 TASK_INTERRUPTIBLE); \
1112 __ret; \
1113})
1114
1115#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1116({ \
1117 long __ret = timeout; \
1118 if (!___wait_cond_timeout(condition)) \
1119 __ret = __wait_event_lock_irq_timeout( \
1120 wq_head, condition, lock, timeout, \
1121 TASK_UNINTERRUPTIBLE); \
1122 __ret; \
1123})
1124
1125
1126
1127
1128void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1129void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1130long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1131void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1132long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1133int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1134int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1135
1136#define DEFINE_WAIT_FUNC(name, function) \
1137 struct wait_queue_entry name = { \
1138 .private = current, \
1139 .func = function, \
1140 .entry = LIST_HEAD_INIT((name).entry), \
1141 }
1142
1143#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1144
1145#define init_wait(wait) \
1146 do { \
1147 (wait)->private = current; \
1148 (wait)->func = autoremove_wake_function; \
1149 INIT_LIST_HEAD(&(wait)->entry); \
1150 (wait)->flags = 0; \
1151 } while (0)
1152
1153bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg);
1154
1155#endif
1156