1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
4
5
6#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <asm/current.h>
10#include <uapi/linux/wait.h>
11
12typedef struct __wait_queue wait_queue_t;
13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15
16
17#define WQ_FLAG_EXCLUSIVE 0x01
18#define WQ_FLAG_WOKEN 0x02
19
20struct __wait_queue {
21 unsigned int flags;
22 void *private;
23 wait_queue_func_t func;
24 struct list_head task_list;
25};
26
27struct wait_bit_key {
28 void *flags;
29 int bit_nr;
30#define WAIT_ATOMIC_T_BIT_NR -1
31 unsigned long timeout;
32};
33
34struct wait_bit_queue {
35 struct wait_bit_key key;
36 wait_queue_t wait;
37};
38
39struct __wait_queue_head {
40 spinlock_t lock;
41 struct list_head task_list;
42};
43typedef struct __wait_queue_head wait_queue_head_t;
44
45struct task_struct;
46
47
48
49
50
51#define __WAITQUEUE_INITIALIZER(name, tsk) { \
52 .private = tsk, \
53 .func = default_wake_function, \
54 .task_list = { NULL, NULL } }
55
56#define DECLARE_WAITQUEUE(name, tsk) \
57 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
60 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
61 .task_list = { &(name).task_list, &(name).task_list } }
62
63#define DECLARE_WAIT_QUEUE_HEAD(name) \
64 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
67 { .flags = word, .bit_nr = bit, }
68
69#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
70 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
72extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
73
74#define init_waitqueue_head(q) \
75 do { \
76 static struct lock_class_key __key; \
77 \
78 __init_waitqueue_head((q), #q, &__key); \
79 } while (0)
80
81#ifdef CONFIG_LOCKDEP
82# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83 ({ init_waitqueue_head(&name); name; })
84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86#else
87# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88#endif
89
90static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91{
92 q->flags = 0;
93 q->private = p;
94 q->func = default_wake_function;
95}
96
97static inline void
98init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
99{
100 q->flags = 0;
101 q->private = NULL;
102 q->func = func;
103}
104
105static inline int waitqueue_active(wait_queue_head_t *q)
106{
107 return !list_empty(&q->task_list);
108}
109
110extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
111extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
112extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
113
114static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
115{
116 list_add(&new->task_list, &head->task_list);
117}
118
119
120
121
122static inline void
123__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
124{
125 wait->flags |= WQ_FLAG_EXCLUSIVE;
126 __add_wait_queue(q, wait);
127}
128
129static inline void __add_wait_queue_tail(wait_queue_head_t *head,
130 wait_queue_t *new)
131{
132 list_add_tail(&new->task_list, &head->task_list);
133}
134
135static inline void
136__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
137{
138 wait->flags |= WQ_FLAG_EXCLUSIVE;
139 __add_wait_queue_tail(q, wait);
140}
141
142static inline void
143__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
144{
145 list_del(&old->task_list);
146}
147
148typedef int wait_bit_action_f(struct wait_bit_key *);
149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
152void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
154void __wake_up_bit(wait_queue_head_t *, void *, int);
155int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
156int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
157void wake_up_bit(void *, int);
158void wake_up_atomic_t(atomic_t *);
159int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
160int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
161int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
162int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
163wait_queue_head_t *bit_waitqueue(void *, int);
164
165#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
166#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
167#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
168#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
169#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
170
171#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
172#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
173#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
174#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
175
176
177
178
179#define wake_up_poll(x, m) \
180 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
181#define wake_up_locked_poll(x, m) \
182 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
183#define wake_up_interruptible_poll(x, m) \
184 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
185#define wake_up_interruptible_sync_poll(x, m) \
186 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
187
188#define ___wait_cond_timeout(condition) \
189({ \
190 bool __cond = (condition); \
191 if (__cond && !__ret) \
192 __ret = 1; \
193 __cond || !__ret; \
194})
195
196#define ___wait_is_interruptible(state) \
197 (!__builtin_constant_p(state) || \
198 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
199
200
201
202
203
204
205
206
207
208
209
210
211
212#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
213({ \
214 __label__ __out; \
215 wait_queue_t __wait; \
216 long __ret = ret; \
217 \
218 INIT_LIST_HEAD(&__wait.task_list); \
219 if (exclusive) \
220 __wait.flags = WQ_FLAG_EXCLUSIVE; \
221 else \
222 __wait.flags = 0; \
223 \
224 for (;;) { \
225 long __int = prepare_to_wait_event(&wq, &__wait, state);\
226 \
227 if (condition) \
228 break; \
229 \
230 if (___wait_is_interruptible(state) && __int) { \
231 __ret = __int; \
232 if (exclusive) { \
233 abort_exclusive_wait(&wq, &__wait, \
234 state, NULL); \
235 goto __out; \
236 } \
237 break; \
238 } \
239 \
240 cmd; \
241 } \
242 finish_wait(&wq, &__wait); \
243__out: __ret; \
244})
245
246#define __wait_event(wq, condition) \
247 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
248 schedule())
249
250
251
252
253
254
255
256
257
258
259
260
261
262#define wait_event(wq, condition) \
263do { \
264 might_sleep(); \
265 if (condition) \
266 break; \
267 __wait_event(wq, condition); \
268} while (0)
269
270#define __io_wait_event(wq, condition) \
271 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
272 io_schedule())
273
274
275
276
277#define io_wait_event(wq, condition) \
278do { \
279 might_sleep(); \
280 if (condition) \
281 break; \
282 __io_wait_event(wq, condition); \
283} while (0)
284
285#define __wait_event_freezable(wq, condition) \
286 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
287 schedule(); try_to_freeze())
288
289
290
291
292
293
294
295
296
297
298
299
300
301#define wait_event_freezable(wq, condition) \
302({ \
303 int __ret = 0; \
304 might_sleep(); \
305 if (!(condition)) \
306 __ret = __wait_event_freezable(wq, condition); \
307 __ret; \
308})
309
310#define __wait_event_timeout(wq, condition, timeout) \
311 ___wait_event(wq, ___wait_cond_timeout(condition), \
312 TASK_UNINTERRUPTIBLE, 0, timeout, \
313 __ret = schedule_timeout(__ret))
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334#define wait_event_timeout(wq, condition, timeout) \
335({ \
336 long __ret = timeout; \
337 might_sleep(); \
338 if (!___wait_cond_timeout(condition)) \
339 __ret = __wait_event_timeout(wq, condition, timeout); \
340 __ret; \
341})
342
343#define __wait_event_freezable_timeout(wq, condition, timeout) \
344 ___wait_event(wq, ___wait_cond_timeout(condition), \
345 TASK_INTERRUPTIBLE, 0, timeout, \
346 __ret = schedule_timeout(__ret); try_to_freeze())
347
348
349
350
351
352#define wait_event_freezable_timeout(wq, condition, timeout) \
353({ \
354 long __ret = timeout; \
355 might_sleep(); \
356 if (!___wait_cond_timeout(condition)) \
357 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
358 __ret; \
359})
360
361#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
362 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
363 cmd1; schedule(); cmd2)
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379#define wait_event_cmd(wq, condition, cmd1, cmd2) \
380do { \
381 if (condition) \
382 break; \
383 __wait_event_cmd(wq, condition, cmd1, cmd2); \
384} while (0)
385
386#define __wait_event_interruptible(wq, condition) \
387 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
388 schedule())
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405#define wait_event_interruptible(wq, condition) \
406({ \
407 int __ret = 0; \
408 might_sleep(); \
409 if (!(condition)) \
410 __ret = __wait_event_interruptible(wq, condition); \
411 __ret; \
412})
413
414#define __wait_event_interruptible_timeout(wq, condition, timeout) \
415 ___wait_event(wq, ___wait_cond_timeout(condition), \
416 TASK_INTERRUPTIBLE, 0, timeout, \
417 __ret = schedule_timeout(__ret))
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439#define wait_event_interruptible_timeout(wq, condition, timeout) \
440({ \
441 long __ret = timeout; \
442 might_sleep(); \
443 if (!___wait_cond_timeout(condition)) \
444 __ret = __wait_event_interruptible_timeout(wq, \
445 condition, timeout); \
446 __ret; \
447})
448
449#define __wait_event_hrtimeout(wq, condition, timeout, state) \
450({ \
451 int __ret = 0; \
452 struct hrtimer_sleeper __t; \
453 \
454 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
455 HRTIMER_MODE_REL); \
456 hrtimer_init_sleeper(&__t, current); \
457 if ((timeout).tv64 != KTIME_MAX) \
458 hrtimer_start_range_ns(&__t.timer, timeout, \
459 current->timer_slack_ns, \
460 HRTIMER_MODE_REL); \
461 \
462 __ret = ___wait_event(wq, condition, state, 0, 0, \
463 if (!__t.task) { \
464 __ret = -ETIME; \
465 break; \
466 } \
467 schedule()); \
468 \
469 hrtimer_cancel(&__t.timer); \
470 destroy_hrtimer_on_stack(&__t.timer); \
471 __ret; \
472})
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490#define wait_event_hrtimeout(wq, condition, timeout) \
491({ \
492 int __ret = 0; \
493 might_sleep(); \
494 if (!(condition)) \
495 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
496 TASK_UNINTERRUPTIBLE); \
497 __ret; \
498})
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
517({ \
518 long __ret = 0; \
519 might_sleep(); \
520 if (!(condition)) \
521 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
522 TASK_INTERRUPTIBLE); \
523 __ret; \
524})
525
526#define __wait_event_interruptible_exclusive(wq, condition) \
527 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
528 schedule())
529
530#define wait_event_interruptible_exclusive(wq, condition) \
531({ \
532 int __ret = 0; \
533 might_sleep(); \
534 if (!(condition)) \
535 __ret = __wait_event_interruptible_exclusive(wq, condition);\
536 __ret; \
537})
538
539
540#define __wait_event_freezable_exclusive(wq, condition) \
541 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
542 schedule(); try_to_freeze())
543
544#define wait_event_freezable_exclusive(wq, condition) \
545({ \
546 int __ret = 0; \
547 might_sleep(); \
548 if (!(condition)) \
549 __ret = __wait_event_freezable_exclusive(wq, condition);\
550 __ret; \
551})
552
553
554#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
555({ \
556 int __ret = 0; \
557 DEFINE_WAIT(__wait); \
558 if (exclusive) \
559 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
560 do { \
561 if (likely(list_empty(&__wait.task_list))) \
562 __add_wait_queue_tail(&(wq), &__wait); \
563 set_current_state(TASK_INTERRUPTIBLE); \
564 if (signal_pending(current)) { \
565 __ret = -ERESTARTSYS; \
566 break; \
567 } \
568 if (irq) \
569 spin_unlock_irq(&(wq).lock); \
570 else \
571 spin_unlock(&(wq).lock); \
572 schedule(); \
573 if (irq) \
574 spin_lock_irq(&(wq).lock); \
575 else \
576 spin_lock(&(wq).lock); \
577 } while (!(condition)); \
578 __remove_wait_queue(&(wq), &__wait); \
579 __set_current_state(TASK_RUNNING); \
580 __ret; \
581})
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607#define wait_event_interruptible_locked(wq, condition) \
608 ((condition) \
609 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634#define wait_event_interruptible_locked_irq(wq, condition) \
635 ((condition) \
636 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665#define wait_event_interruptible_exclusive_locked(wq, condition) \
666 ((condition) \
667 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
697 ((condition) \
698 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
699
700
701#define __wait_event_killable(wq, condition) \
702 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719#define wait_event_killable(wq, condition) \
720({ \
721 int __ret = 0; \
722 might_sleep(); \
723 if (!(condition)) \
724 __ret = __wait_event_killable(wq, condition); \
725 __ret; \
726})
727
728
729#define __wait_event_lock_irq(wq, condition, lock, cmd) \
730 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
731 spin_unlock_irq(&lock); \
732 cmd; \
733 schedule(); \
734 spin_lock_irq(&lock))
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
760do { \
761 if (condition) \
762 break; \
763 __wait_event_lock_irq(wq, condition, lock, cmd); \
764} while (0)
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786#define wait_event_lock_irq(wq, condition, lock) \
787do { \
788 if (condition) \
789 break; \
790 __wait_event_lock_irq(wq, condition, lock, ); \
791} while (0)
792
793
794#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
795 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
796 spin_unlock_irq(&lock); \
797 cmd; \
798 schedule(); \
799 spin_lock_irq(&lock))
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
827({ \
828 int __ret = 0; \
829 if (!(condition)) \
830 __ret = __wait_event_interruptible_lock_irq(wq, \
831 condition, lock, cmd); \
832 __ret; \
833})
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857#define wait_event_interruptible_lock_irq(wq, condition, lock) \
858({ \
859 int __ret = 0; \
860 if (!(condition)) \
861 __ret = __wait_event_interruptible_lock_irq(wq, \
862 condition, lock,); \
863 __ret; \
864})
865
866#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
867 lock, timeout) \
868 ___wait_event(wq, ___wait_cond_timeout(condition), \
869 TASK_INTERRUPTIBLE, 0, timeout, \
870 spin_unlock_irq(&lock); \
871 __ret = schedule_timeout(__ret); \
872 spin_lock_irq(&lock));
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
899 timeout) \
900({ \
901 long __ret = timeout; \
902 if (!___wait_cond_timeout(condition)) \
903 __ret = __wait_event_interruptible_lock_irq_timeout( \
904 wq, condition, lock, timeout); \
905 __ret; \
906})
907
908
909
910
911void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
912void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
913long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
914void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
915void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
916long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
917int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
918int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
919int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
920
921#define DEFINE_WAIT_FUNC(name, function) \
922 wait_queue_t name = { \
923 .private = current, \
924 .func = function, \
925 .task_list = LIST_HEAD_INIT((name).task_list), \
926 }
927
928#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
929
930#define DEFINE_WAIT_BIT(name, word, bit) \
931 struct wait_bit_queue name = { \
932 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
933 .wait = { \
934 .private = current, \
935 .func = wake_bit_function, \
936 .task_list = \
937 LIST_HEAD_INIT((name).wait.task_list), \
938 }, \
939 }
940
941#define init_wait(wait) \
942 do { \
943 (wait)->private = current; \
944 (wait)->func = autoremove_wake_function; \
945 INIT_LIST_HEAD(&(wait)->task_list); \
946 (wait)->flags = 0; \
947 } while (0)
948
949
950extern int bit_wait(struct wait_bit_key *);
951extern int bit_wait_io(struct wait_bit_key *);
952extern int bit_wait_timeout(struct wait_bit_key *);
953extern int bit_wait_io_timeout(struct wait_bit_key *);
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971static inline int
972wait_on_bit(void *word, int bit, unsigned mode)
973{
974 might_sleep();
975 if (!test_bit(bit, word))
976 return 0;
977 return out_of_line_wait_on_bit(word, bit,
978 bit_wait,
979 mode);
980}
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996static inline int
997wait_on_bit_io(void *word, int bit, unsigned mode)
998{
999 might_sleep();
1000 if (!test_bit(bit, word))
1001 return 0;
1002 return out_of_line_wait_on_bit(word, bit,
1003 bit_wait_io,
1004 mode);
1005}
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022static inline int
1023wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
1024{
1025 might_sleep();
1026 if (!test_bit(bit, word))
1027 return 0;
1028 return out_of_line_wait_on_bit_timeout(word, bit,
1029 bit_wait_timeout,
1030 mode, timeout);
1031}
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049static inline int
1050wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1051{
1052 might_sleep();
1053 if (!test_bit(bit, word))
1054 return 0;
1055 return out_of_line_wait_on_bit(word, bit, action, mode);
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077static inline int
1078wait_on_bit_lock(void *word, int bit, unsigned mode)
1079{
1080 might_sleep();
1081 if (!test_and_set_bit(bit, word))
1082 return 0;
1083 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1084}
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101static inline int
1102wait_on_bit_lock_io(void *word, int bit, unsigned mode)
1103{
1104 might_sleep();
1105 if (!test_and_set_bit(bit, word))
1106 return 0;
1107 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127static inline int
1128wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1129{
1130 might_sleep();
1131 if (!test_and_set_bit(bit, word))
1132 return 0;
1133 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1134}
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146static inline
1147int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1148{
1149 might_sleep();
1150 if (atomic_read(val) == 0)
1151 return 0;
1152 return out_of_line_wait_on_atomic_t(val, action, mode);
1153}
1154
1155#endif
1156