1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
4
5
6#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9
10#include <asm/current.h>
11#include <uapi/linux/wait.h>
12
13typedef struct __wait_queue wait_queue_t;
14typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
15int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
16
17
18#define WQ_FLAG_EXCLUSIVE 0x01
19#define WQ_FLAG_WOKEN 0x02
20
21struct __wait_queue {
22 unsigned int flags;
23 void *private;
24 wait_queue_func_t func;
25 struct list_head task_list;
26};
27
28struct wait_bit_key {
29 void *flags;
30 int bit_nr;
31#define WAIT_ATOMIC_T_BIT_NR -1
32 unsigned long timeout;
33};
34
35struct wait_bit_queue {
36 struct wait_bit_key key;
37 wait_queue_t wait;
38};
39
40struct __wait_queue_head {
41 spinlock_t lock;
42 struct list_head task_list;
43};
44typedef struct __wait_queue_head wait_queue_head_t;
45
46struct task_struct;
47
48
49
50
51
52#define __WAITQUEUE_INITIALIZER(name, tsk) { \
53 .private = tsk, \
54 .func = default_wake_function, \
55 .task_list = { NULL, NULL } }
56
57#define DECLARE_WAITQUEUE(name, tsk) \
58 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
59
60#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
61 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
62 .task_list = { &(name).task_list, &(name).task_list } }
63
64#define DECLARE_WAIT_QUEUE_HEAD(name) \
65 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
66
67#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
68 { .flags = word, .bit_nr = bit, }
69
70#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
71 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
72
73extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
74
75#define init_waitqueue_head(q) \
76 do { \
77 static struct lock_class_key __key; \
78 \
79 __init_waitqueue_head((q), #q, &__key); \
80 } while (0)
81
82#ifdef CONFIG_LOCKDEP
83# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
84 ({ init_waitqueue_head(&name); name; })
85# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
86 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
87#else
88# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
89#endif
90
91static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
92{
93 q->flags = 0;
94 q->private = p;
95 q->func = default_wake_function;
96}
97
98static inline void
99init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
100{
101 q->flags = 0;
102 q->private = NULL;
103 q->func = func;
104}
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136static inline int waitqueue_active(wait_queue_head_t *q)
137{
138 return !list_empty(&q->task_list);
139}
140
141
142
143
144
145
146
147
148
149static inline bool wq_has_sleeper(wait_queue_head_t *wq)
150{
151
152
153
154
155
156
157
158 smp_mb();
159 return waitqueue_active(wq);
160}
161
162extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
163extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
164extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
165
166static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
167{
168 list_add(&new->task_list, &head->task_list);
169}
170
171
172
173
174static inline void
175__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
176{
177 wait->flags |= WQ_FLAG_EXCLUSIVE;
178 __add_wait_queue(q, wait);
179}
180
181static inline void __add_wait_queue_tail(wait_queue_head_t *head,
182 wait_queue_t *new)
183{
184 list_add_tail(&new->task_list, &head->task_list);
185}
186
187static inline void
188__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
189{
190 wait->flags |= WQ_FLAG_EXCLUSIVE;
191 __add_wait_queue_tail(q, wait);
192}
193
194static inline void
195__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
196{
197 list_del(&old->task_list);
198}
199
200typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
201void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
202void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
203void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
204void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
205void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
206void __wake_up_bit(wait_queue_head_t *, void *, int);
207int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
208int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
209void wake_up_bit(void *, int);
210void wake_up_atomic_t(atomic_t *);
211int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
212int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
213int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
214int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
215wait_queue_head_t *bit_waitqueue(void *, int);
216
217#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
218#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
219#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
220#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
221#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
222
223#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
224#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
225#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
226#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
227
228
229
230
231#define wake_up_poll(x, m) \
232 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
233#define wake_up_locked_poll(x, m) \
234 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
235#define wake_up_interruptible_poll(x, m) \
236 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
237#define wake_up_interruptible_sync_poll(x, m) \
238 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
239
240#define ___wait_cond_timeout(condition) \
241({ \
242 bool __cond = (condition); \
243 if (__cond && !__ret) \
244 __ret = 1; \
245 __cond || !__ret; \
246})
247
248#define ___wait_is_interruptible(state) \
249 (!__builtin_constant_p(state) || \
250 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
251
252extern void init_wait_entry(wait_queue_t *__wait, int flags);
253
254
255
256
257
258
259
260
261
262
263
264
265
266#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
267({ \
268 __label__ __out; \
269 wait_queue_t __wait; \
270 long __ret = ret; \
271 \
272 init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
273 for (;;) { \
274 long __int = prepare_to_wait_event(&wq, &__wait, state);\
275 \
276 if (condition) \
277 break; \
278 \
279 if (___wait_is_interruptible(state) && __int) { \
280 __ret = __int; \
281 goto __out; \
282 } \
283 \
284 cmd; \
285 } \
286 finish_wait(&wq, &__wait); \
287__out: __ret; \
288})
289
290#define __wait_event(wq, condition) \
291 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
292 schedule())
293
294
295
296
297
298
299
300
301
302
303
304
305
306#define wait_event(wq, condition) \
307do { \
308 might_sleep(); \
309 if (condition) \
310 break; \
311 __wait_event(wq, condition); \
312} while (0)
313
314#define __io_wait_event(wq, condition) \
315 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
316 io_schedule())
317
318
319
320
321#define io_wait_event(wq, condition) \
322do { \
323 might_sleep(); \
324 if (condition) \
325 break; \
326 __io_wait_event(wq, condition); \
327} while (0)
328
329#define __wait_event_freezable(wq, condition) \
330 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
331 schedule(); try_to_freeze())
332
333
334
335
336
337
338
339
340
341
342
343
344
345#define wait_event_freezable(wq, condition) \
346({ \
347 int __ret = 0; \
348 might_sleep(); \
349 if (!(condition)) \
350 __ret = __wait_event_freezable(wq, condition); \
351 __ret; \
352})
353
354#define __wait_event_timeout(wq, condition, timeout) \
355 ___wait_event(wq, ___wait_cond_timeout(condition), \
356 TASK_UNINTERRUPTIBLE, 0, timeout, \
357 __ret = schedule_timeout(__ret))
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378#define wait_event_timeout(wq, condition, timeout) \
379({ \
380 long __ret = timeout; \
381 might_sleep(); \
382 if (!___wait_cond_timeout(condition)) \
383 __ret = __wait_event_timeout(wq, condition, timeout); \
384 __ret; \
385})
386
387#define __wait_event_freezable_timeout(wq, condition, timeout) \
388 ___wait_event(wq, ___wait_cond_timeout(condition), \
389 TASK_INTERRUPTIBLE, 0, timeout, \
390 __ret = schedule_timeout(__ret); try_to_freeze())
391
392
393
394
395
396#define wait_event_freezable_timeout(wq, condition, timeout) \
397({ \
398 long __ret = timeout; \
399 might_sleep(); \
400 if (!___wait_cond_timeout(condition)) \
401 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
402 __ret; \
403})
404
405#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
406 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
407 cmd1; schedule(); cmd2)
408
409
410
411#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
412do { \
413 if (condition) \
414 break; \
415 __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
416} while (0)
417
418#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
419 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
420 cmd1; schedule(); cmd2)
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436#define wait_event_cmd(wq, condition, cmd1, cmd2) \
437do { \
438 if (condition) \
439 break; \
440 __wait_event_cmd(wq, condition, cmd1, cmd2); \
441} while (0)
442
443#define __wait_event_interruptible(wq, condition) \
444 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
445 schedule())
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462#define wait_event_interruptible(wq, condition) \
463({ \
464 int __ret = 0; \
465 might_sleep(); \
466 if (!(condition)) \
467 __ret = __wait_event_interruptible(wq, condition); \
468 __ret; \
469})
470
471#define __wait_event_interruptible_timeout(wq, condition, timeout) \
472 ___wait_event(wq, ___wait_cond_timeout(condition), \
473 TASK_INTERRUPTIBLE, 0, timeout, \
474 __ret = schedule_timeout(__ret))
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496#define wait_event_interruptible_timeout(wq, condition, timeout) \
497({ \
498 long __ret = timeout; \
499 might_sleep(); \
500 if (!___wait_cond_timeout(condition)) \
501 __ret = __wait_event_interruptible_timeout(wq, \
502 condition, timeout); \
503 __ret; \
504})
505
506#define __wait_event_hrtimeout(wq, condition, timeout, state) \
507({ \
508 int __ret = 0; \
509 struct hrtimer_sleeper __t; \
510 \
511 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
512 HRTIMER_MODE_REL); \
513 hrtimer_init_sleeper(&__t, current); \
514 if ((timeout) != KTIME_MAX) \
515 hrtimer_start_range_ns(&__t.timer, timeout, \
516 current->timer_slack_ns, \
517 HRTIMER_MODE_REL); \
518 \
519 __ret = ___wait_event(wq, condition, state, 0, 0, \
520 if (!__t.task) { \
521 __ret = -ETIME; \
522 break; \
523 } \
524 schedule()); \
525 \
526 hrtimer_cancel(&__t.timer); \
527 destroy_hrtimer_on_stack(&__t.timer); \
528 __ret; \
529})
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547#define wait_event_hrtimeout(wq, condition, timeout) \
548({ \
549 int __ret = 0; \
550 might_sleep(); \
551 if (!(condition)) \
552 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
553 TASK_UNINTERRUPTIBLE); \
554 __ret; \
555})
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
574({ \
575 long __ret = 0; \
576 might_sleep(); \
577 if (!(condition)) \
578 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
579 TASK_INTERRUPTIBLE); \
580 __ret; \
581})
582
583#define __wait_event_interruptible_exclusive(wq, condition) \
584 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
585 schedule())
586
587#define wait_event_interruptible_exclusive(wq, condition) \
588({ \
589 int __ret = 0; \
590 might_sleep(); \
591 if (!(condition)) \
592 __ret = __wait_event_interruptible_exclusive(wq, condition);\
593 __ret; \
594})
595
596#define __wait_event_killable_exclusive(wq, condition) \
597 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
598 schedule())
599
600#define wait_event_killable_exclusive(wq, condition) \
601({ \
602 int __ret = 0; \
603 might_sleep(); \
604 if (!(condition)) \
605 __ret = __wait_event_killable_exclusive(wq, condition); \
606 __ret; \
607})
608
609
610#define __wait_event_freezable_exclusive(wq, condition) \
611 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
612 schedule(); try_to_freeze())
613
614#define wait_event_freezable_exclusive(wq, condition) \
615({ \
616 int __ret = 0; \
617 might_sleep(); \
618 if (!(condition)) \
619 __ret = __wait_event_freezable_exclusive(wq, condition);\
620 __ret; \
621})
622
623extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *);
624extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
625
626#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
627({ \
628 int __ret; \
629 DEFINE_WAIT(__wait); \
630 if (exclusive) \
631 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
632 do { \
633 __ret = fn(&(wq), &__wait); \
634 if (__ret) \
635 break; \
636 } while (!(condition)); \
637 __remove_wait_queue(&(wq), &__wait); \
638 __set_current_state(TASK_RUNNING); \
639 __ret; \
640})
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666#define wait_event_interruptible_locked(wq, condition) \
667 ((condition) \
668 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693#define wait_event_interruptible_locked_irq(wq, condition) \
694 ((condition) \
695 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724#define wait_event_interruptible_exclusive_locked(wq, condition) \
725 ((condition) \
726 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
756 ((condition) \
757 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
758
759
760#define __wait_event_killable(wq, condition) \
761 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778#define wait_event_killable(wq, condition) \
779({ \
780 int __ret = 0; \
781 might_sleep(); \
782 if (!(condition)) \
783 __ret = __wait_event_killable(wq, condition); \
784 __ret; \
785})
786
787
788#define __wait_event_lock_irq(wq, condition, lock, cmd) \
789 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
790 spin_unlock_irq(&lock); \
791 cmd; \
792 schedule(); \
793 spin_lock_irq(&lock))
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
819do { \
820 if (condition) \
821 break; \
822 __wait_event_lock_irq(wq, condition, lock, cmd); \
823} while (0)
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845#define wait_event_lock_irq(wq, condition, lock) \
846do { \
847 if (condition) \
848 break; \
849 __wait_event_lock_irq(wq, condition, lock, ); \
850} while (0)
851
852
853#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
854 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
855 spin_unlock_irq(&lock); \
856 cmd; \
857 schedule(); \
858 spin_lock_irq(&lock))
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
886({ \
887 int __ret = 0; \
888 if (!(condition)) \
889 __ret = __wait_event_interruptible_lock_irq(wq, \
890 condition, lock, cmd); \
891 __ret; \
892})
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916#define wait_event_interruptible_lock_irq(wq, condition, lock) \
917({ \
918 int __ret = 0; \
919 if (!(condition)) \
920 __ret = __wait_event_interruptible_lock_irq(wq, \
921 condition, lock,); \
922 __ret; \
923})
924
925#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
926 lock, timeout) \
927 ___wait_event(wq, ___wait_cond_timeout(condition), \
928 TASK_INTERRUPTIBLE, 0, timeout, \
929 spin_unlock_irq(&lock); \
930 __ret = schedule_timeout(__ret); \
931 spin_lock_irq(&lock));
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
958 timeout) \
959({ \
960 long __ret = timeout; \
961 if (!___wait_cond_timeout(condition)) \
962 __ret = __wait_event_interruptible_lock_irq_timeout( \
963 wq, condition, lock, timeout); \
964 __ret; \
965})
966
967
968
969
970void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
971void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
972long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
973void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
974long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
975int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
976int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
977int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
978
979#define DEFINE_WAIT_FUNC(name, function) \
980 wait_queue_t name = { \
981 .private = current, \
982 .func = function, \
983 .task_list = LIST_HEAD_INIT((name).task_list), \
984 }
985
986#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
987
988#define DEFINE_WAIT_BIT(name, word, bit) \
989 struct wait_bit_queue name = { \
990 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
991 .wait = { \
992 .private = current, \
993 .func = wake_bit_function, \
994 .task_list = \
995 LIST_HEAD_INIT((name).wait.task_list), \
996 }, \
997 }
998
999#define init_wait(wait) \
1000 do { \
1001 (wait)->private = current; \
1002 (wait)->func = autoremove_wake_function; \
1003 INIT_LIST_HEAD(&(wait)->task_list); \
1004 (wait)->flags = 0; \
1005 } while (0)
1006
1007
1008extern int bit_wait(struct wait_bit_key *, int);
1009extern int bit_wait_io(struct wait_bit_key *, int);
1010extern int bit_wait_timeout(struct wait_bit_key *, int);
1011extern int bit_wait_io_timeout(struct wait_bit_key *, int);
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029static inline int
1030wait_on_bit(unsigned long *word, int bit, unsigned mode)
1031{
1032 might_sleep();
1033 if (!test_bit(bit, word))
1034 return 0;
1035 return out_of_line_wait_on_bit(word, bit,
1036 bit_wait,
1037 mode);
1038}
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054static inline int
1055wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1056{
1057 might_sleep();
1058 if (!test_bit(bit, word))
1059 return 0;
1060 return out_of_line_wait_on_bit(word, bit,
1061 bit_wait_io,
1062 mode);
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static inline int
1081wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1082 unsigned long timeout)
1083{
1084 might_sleep();
1085 if (!test_bit(bit, word))
1086 return 0;
1087 return out_of_line_wait_on_bit_timeout(word, bit,
1088 bit_wait_timeout,
1089 mode, timeout);
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108static inline int
1109wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1110 unsigned mode)
1111{
1112 might_sleep();
1113 if (!test_bit(bit, word))
1114 return 0;
1115 return out_of_line_wait_on_bit(word, bit, action, mode);
1116}
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137static inline int
1138wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1139{
1140 might_sleep();
1141 if (!test_and_set_bit(bit, word))
1142 return 0;
1143 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161static inline int
1162wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1163{
1164 might_sleep();
1165 if (!test_and_set_bit(bit, word))
1166 return 0;
1167 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1168}
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static inline int
1188wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1189 unsigned mode)
1190{
1191 might_sleep();
1192 if (!test_and_set_bit(bit, word))
1193 return 0;
1194 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207static inline
1208int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1209{
1210 might_sleep();
1211 if (atomic_read(val) == 0)
1212 return 0;
1213 return out_of_line_wait_on_atomic_t(val, action, mode);
1214}
1215
1216#endif
1217