1
2#ifndef _LINUX_WAIT_H
3#define _LINUX_WAIT_H
4
5
6
7#include <linux/list.h>
8#include <linux/stddef.h>
9#include <linux/spinlock.h>
10
11#include <asm/current.h>
12#include <uapi/linux/wait.h>
13
14typedef struct wait_queue_entry wait_queue_entry_t;
15
16typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19
20#define WQ_FLAG_EXCLUSIVE 0x01
21#define WQ_FLAG_WOKEN 0x02
22#define WQ_FLAG_BOOKMARK 0x04
23#define WQ_FLAG_CUSTOM 0x08
24#define WQ_FLAG_DONE 0x10
25#define WQ_FLAG_PRIORITY 0x20
26
27
28
29
30struct wait_queue_entry {
31 unsigned int flags;
32 void *private;
33 wait_queue_func_t func;
34 struct list_head entry;
35};
36
37struct wait_queue_head {
38 spinlock_t lock;
39 struct list_head head;
40};
41typedef struct wait_queue_head wait_queue_head_t;
42
43struct task_struct;
44
45
46
47
48
49#define __WAITQUEUE_INITIALIZER(name, tsk) { \
50 .private = tsk, \
51 .func = default_wake_function, \
52 .entry = { NULL, NULL } }
53
54#define DECLARE_WAITQUEUE(name, tsk) \
55 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
56
57#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
58 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
59 .head = LIST_HEAD_INIT(name.head) }
60
61#define DECLARE_WAIT_QUEUE_HEAD(name) \
62 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
63
64extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
65
66#define init_waitqueue_head(wq_head) \
67 do { \
68 static struct lock_class_key __key; \
69 \
70 __init_waitqueue_head((wq_head), #wq_head, &__key); \
71 } while (0)
72
73#ifdef CONFIG_LOCKDEP
74# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
75 ({ init_waitqueue_head(&name); name; })
76# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
77 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
78#else
79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
80#endif
81
82static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
83{
84 wq_entry->flags = 0;
85 wq_entry->private = p;
86 wq_entry->func = default_wake_function;
87}
88
89static inline void
90init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
91{
92 wq_entry->flags = 0;
93 wq_entry->private = NULL;
94 wq_entry->func = func;
95}
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127static inline int waitqueue_active(struct wait_queue_head *wq_head)
128{
129 return !list_empty(&wq_head->head);
130}
131
132
133
134
135
136
137
138
139
140static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
141{
142 return list_is_singular(&wq_head->head);
143}
144
145
146
147
148
149
150
151
152
153static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
154{
155
156
157
158
159
160
161
162 smp_mb();
163 return waitqueue_active(wq_head);
164}
165
166extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
169extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
170
171static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
172{
173 struct list_head *head = &wq_head->head;
174 struct wait_queue_entry *wq;
175
176 list_for_each_entry(wq, &wq_head->head, entry) {
177 if (!(wq->flags & WQ_FLAG_PRIORITY))
178 break;
179 head = &wq->entry;
180 }
181 list_add(&wq_entry->entry, head);
182}
183
184
185
186
187static inline void
188__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
189{
190 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
191 __add_wait_queue(wq_head, wq_entry);
192}
193
194static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
195{
196 list_add_tail(&wq_entry->entry, &wq_head->head);
197}
198
199static inline void
200__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
201{
202 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
203 __add_wait_queue_entry_tail(wq_head, wq_entry);
204}
205
206static inline void
207__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
208{
209 list_del(&wq_entry->entry);
210}
211
212void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
213void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
214void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
215 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
216void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
217void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
218void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
219void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
220void __wake_up_pollfree(struct wait_queue_head *wq_head);
221
222#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
223#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
224#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
225#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
226#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
227
228#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
229#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
230#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
231#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
232
233
234
235
236#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
237#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
238#define wake_up_poll(x, m) \
239 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
240#define wake_up_locked_poll(x, m) \
241 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
242#define wake_up_interruptible_poll(x, m) \
243 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
244#define wake_up_interruptible_sync_poll(x, m) \
245 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
246#define wake_up_interruptible_sync_poll_locked(x, m) \
247 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
248
249
250
251
252
253
254
255
256
257
258
259
260
261static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
262{
263
264
265
266
267
268
269
270 if (waitqueue_active(wq_head))
271 __wake_up_pollfree(wq_head);
272}
273
274#define ___wait_cond_timeout(condition) \
275({ \
276 bool __cond = (condition); \
277 if (__cond && !__ret) \
278 __ret = 1; \
279 __cond || !__ret; \
280})
281
282#define ___wait_is_interruptible(state) \
283 (!__builtin_constant_p(state) || \
284 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
285
286extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
287
288
289
290
291
292
293
294
295
296
297
298
299
300#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
301({ \
302 __label__ __out; \
303 struct wait_queue_entry __wq_entry; \
304 long __ret = ret; \
305 \
306 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
307 for (;;) { \
308 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
309 \
310 if (condition) \
311 break; \
312 \
313 if (___wait_is_interruptible(state) && __int) { \
314 __ret = __int; \
315 goto __out; \
316 } \
317 \
318 cmd; \
319 } \
320 finish_wait(&wq_head, &__wq_entry); \
321__out: __ret; \
322})
323
324#define __wait_event(wq_head, condition) \
325 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
326 schedule())
327
328
329
330
331
332
333
334
335
336
337
338
339
340#define wait_event(wq_head, condition) \
341do { \
342 might_sleep(); \
343 if (condition) \
344 break; \
345 __wait_event(wq_head, condition); \
346} while (0)
347
348#define __io_wait_event(wq_head, condition) \
349 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
350 io_schedule())
351
352
353
354
355#define io_wait_event(wq_head, condition) \
356do { \
357 might_sleep(); \
358 if (condition) \
359 break; \
360 __io_wait_event(wq_head, condition); \
361} while (0)
362
363#define __wait_event_freezable(wq_head, condition) \
364 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
365 freezable_schedule())
366
367
368
369
370
371
372
373
374
375
376
377
378
379#define wait_event_freezable(wq_head, condition) \
380({ \
381 int __ret = 0; \
382 might_sleep(); \
383 if (!(condition)) \
384 __ret = __wait_event_freezable(wq_head, condition); \
385 __ret; \
386})
387
388#define __wait_event_timeout(wq_head, condition, timeout) \
389 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
390 TASK_UNINTERRUPTIBLE, 0, timeout, \
391 __ret = schedule_timeout(__ret))
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412#define wait_event_timeout(wq_head, condition, timeout) \
413({ \
414 long __ret = timeout; \
415 might_sleep(); \
416 if (!___wait_cond_timeout(condition)) \
417 __ret = __wait_event_timeout(wq_head, condition, timeout); \
418 __ret; \
419})
420
421#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
422 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
423 TASK_INTERRUPTIBLE, 0, timeout, \
424 __ret = freezable_schedule_timeout(__ret))
425
426
427
428
429
430#define wait_event_freezable_timeout(wq_head, condition, timeout) \
431({ \
432 long __ret = timeout; \
433 might_sleep(); \
434 if (!___wait_cond_timeout(condition)) \
435 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
436 __ret; \
437})
438
439#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
440 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
441 cmd1; schedule(); cmd2)
442
443
444
445#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
446do { \
447 if (condition) \
448 break; \
449 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
450} while (0)
451
452#define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
453 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
454 cmd1; schedule(); cmd2)
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470#define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
471do { \
472 if (condition) \
473 break; \
474 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
475} while (0)
476
477#define __wait_event_interruptible(wq_head, condition) \
478 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
479 schedule())
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496#define wait_event_interruptible(wq_head, condition) \
497({ \
498 int __ret = 0; \
499 might_sleep(); \
500 if (!(condition)) \
501 __ret = __wait_event_interruptible(wq_head, condition); \
502 __ret; \
503})
504
505#define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
506 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
507 TASK_INTERRUPTIBLE, 0, timeout, \
508 __ret = schedule_timeout(__ret))
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530#define wait_event_interruptible_timeout(wq_head, condition, timeout) \
531({ \
532 long __ret = timeout; \
533 might_sleep(); \
534 if (!___wait_cond_timeout(condition)) \
535 __ret = __wait_event_interruptible_timeout(wq_head, \
536 condition, timeout); \
537 __ret; \
538})
539
540#define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
541({ \
542 int __ret = 0; \
543 struct hrtimer_sleeper __t; \
544 \
545 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
546 HRTIMER_MODE_REL); \
547 if ((timeout) != KTIME_MAX) \
548 hrtimer_start_range_ns(&__t.timer, timeout, \
549 current->timer_slack_ns, \
550 HRTIMER_MODE_REL); \
551 \
552 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
553 if (!__t.task) { \
554 __ret = -ETIME; \
555 break; \
556 } \
557 schedule()); \
558 \
559 hrtimer_cancel(&__t.timer); \
560 destroy_hrtimer_on_stack(&__t.timer); \
561 __ret; \
562})
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580#define wait_event_hrtimeout(wq_head, condition, timeout) \
581({ \
582 int __ret = 0; \
583 might_sleep(); \
584 if (!(condition)) \
585 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
586 TASK_UNINTERRUPTIBLE); \
587 __ret; \
588})
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
607({ \
608 long __ret = 0; \
609 might_sleep(); \
610 if (!(condition)) \
611 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
612 TASK_INTERRUPTIBLE); \
613 __ret; \
614})
615
616#define __wait_event_interruptible_exclusive(wq, condition) \
617 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
618 schedule())
619
620#define wait_event_interruptible_exclusive(wq, condition) \
621({ \
622 int __ret = 0; \
623 might_sleep(); \
624 if (!(condition)) \
625 __ret = __wait_event_interruptible_exclusive(wq, condition); \
626 __ret; \
627})
628
629#define __wait_event_killable_exclusive(wq, condition) \
630 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
631 schedule())
632
633#define wait_event_killable_exclusive(wq, condition) \
634({ \
635 int __ret = 0; \
636 might_sleep(); \
637 if (!(condition)) \
638 __ret = __wait_event_killable_exclusive(wq, condition); \
639 __ret; \
640})
641
642
643#define __wait_event_freezable_exclusive(wq, condition) \
644 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
645 freezable_schedule())
646
647#define wait_event_freezable_exclusive(wq, condition) \
648({ \
649 int __ret = 0; \
650 might_sleep(); \
651 if (!(condition)) \
652 __ret = __wait_event_freezable_exclusive(wq, condition); \
653 __ret; \
654})
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669#define wait_event_idle(wq_head, condition) \
670do { \
671 might_sleep(); \
672 if (!(condition)) \
673 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
674} while (0)
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693#define wait_event_idle_exclusive(wq_head, condition) \
694do { \
695 might_sleep(); \
696 if (!(condition)) \
697 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
698} while (0)
699
700#define __wait_event_idle_timeout(wq_head, condition, timeout) \
701 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
702 TASK_IDLE, 0, timeout, \
703 __ret = schedule_timeout(__ret))
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724#define wait_event_idle_timeout(wq_head, condition, timeout) \
725({ \
726 long __ret = timeout; \
727 might_sleep(); \
728 if (!___wait_cond_timeout(condition)) \
729 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
730 __ret; \
731})
732
733#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
734 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
735 TASK_IDLE, 1, timeout, \
736 __ret = schedule_timeout(__ret))
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
762({ \
763 long __ret = timeout; \
764 might_sleep(); \
765 if (!___wait_cond_timeout(condition)) \
766 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
767 __ret; \
768})
769
770extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
771extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
772
773#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
774({ \
775 int __ret; \
776 DEFINE_WAIT(__wait); \
777 if (exclusive) \
778 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
779 do { \
780 __ret = fn(&(wq), &__wait); \
781 if (__ret) \
782 break; \
783 } while (!(condition)); \
784 __remove_wait_queue(&(wq), &__wait); \
785 __set_current_state(TASK_RUNNING); \
786 __ret; \
787})
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813#define wait_event_interruptible_locked(wq, condition) \
814 ((condition) \
815 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840#define wait_event_interruptible_locked_irq(wq, condition) \
841 ((condition) \
842 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871#define wait_event_interruptible_exclusive_locked(wq, condition) \
872 ((condition) \
873 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
903 ((condition) \
904 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
905
906
907#define __wait_event_killable(wq, condition) \
908 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925#define wait_event_killable(wq_head, condition) \
926({ \
927 int __ret = 0; \
928 might_sleep(); \
929 if (!(condition)) \
930 __ret = __wait_event_killable(wq_head, condition); \
931 __ret; \
932})
933
934#define __wait_event_killable_timeout(wq_head, condition, timeout) \
935 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
936 TASK_KILLABLE, 0, timeout, \
937 __ret = schedule_timeout(__ret))
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961#define wait_event_killable_timeout(wq_head, condition, timeout) \
962({ \
963 long __ret = timeout; \
964 might_sleep(); \
965 if (!___wait_cond_timeout(condition)) \
966 __ret = __wait_event_killable_timeout(wq_head, \
967 condition, timeout); \
968 __ret; \
969})
970
971
972#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
973 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
974 spin_unlock_irq(&lock); \
975 cmd; \
976 schedule(); \
977 spin_lock_irq(&lock))
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
1003do { \
1004 if (condition) \
1005 break; \
1006 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
1007} while (0)
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029#define wait_event_lock_irq(wq_head, condition, lock) \
1030do { \
1031 if (condition) \
1032 break; \
1033 __wait_event_lock_irq(wq_head, condition, lock, ); \
1034} while (0)
1035
1036
1037#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1038 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1039 spin_unlock_irq(&lock); \
1040 cmd; \
1041 schedule(); \
1042 spin_lock_irq(&lock))
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1070({ \
1071 int __ret = 0; \
1072 if (!(condition)) \
1073 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1074 condition, lock, cmd); \
1075 __ret; \
1076})
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100#define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1101({ \
1102 int __ret = 0; \
1103 if (!(condition)) \
1104 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1105 condition, lock,); \
1106 __ret; \
1107})
1108
1109#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1110 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1111 state, 0, timeout, \
1112 spin_unlock_irq(&lock); \
1113 __ret = schedule_timeout(__ret); \
1114 spin_lock_irq(&lock));
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1141 timeout) \
1142({ \
1143 long __ret = timeout; \
1144 if (!___wait_cond_timeout(condition)) \
1145 __ret = __wait_event_lock_irq_timeout( \
1146 wq_head, condition, lock, timeout, \
1147 TASK_INTERRUPTIBLE); \
1148 __ret; \
1149})
1150
1151#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1152({ \
1153 long __ret = timeout; \
1154 if (!___wait_cond_timeout(condition)) \
1155 __ret = __wait_event_lock_irq_timeout( \
1156 wq_head, condition, lock, timeout, \
1157 TASK_UNINTERRUPTIBLE); \
1158 __ret; \
1159})
1160
1161
1162
1163
1164void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1165bool prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1166long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1167void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1168long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1169int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1170int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1171
1172#define DEFINE_WAIT_FUNC(name, function) \
1173 struct wait_queue_entry name = { \
1174 .private = current, \
1175 .func = function, \
1176 .entry = LIST_HEAD_INIT((name).entry), \
1177 }
1178
1179#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1180
1181#define init_wait(wait) \
1182 do { \
1183 (wait)->private = current; \
1184 (wait)->func = autoremove_wake_function; \
1185 INIT_LIST_HEAD(&(wait)->entry); \
1186 (wait)->flags = 0; \
1187 } while (0)
1188
1189typedef int (*task_call_f)(struct task_struct *p, void *arg);
1190extern int task_call_func(struct task_struct *p, task_call_f func, void *arg);
1191
1192#endif
1193