1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kernel_stat.h>
22#include <linux/export.h>
23#include <linux/interrupt.h>
24#include <linux/percpu.h>
25#include <linux/init.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/pid_namespace.h>
29#include <linux/notifier.h>
30#include <linux/thread_info.h>
31#include <linux/time.h>
32#include <linux/jiffies.h>
33#include <linux/posix-timers.h>
34#include <linux/cpu.h>
35#include <linux/syscalls.h>
36#include <linux/delay.h>
37#include <linux/tick.h>
38#include <linux/kallsyms.h>
39#include <linux/irq_work.h>
40#include <linux/sched/signal.h>
41#include <linux/sched/sysctl.h>
42#include <linux/sched/nohz.h>
43#include <linux/sched/debug.h>
44#include <linux/slab.h>
45#include <linux/compat.h>
46#include <linux/random.h>
47#include <linux/sysctl.h>
48
49#include <linux/uaccess.h>
50#include <asm/unistd.h>
51#include <asm/div64.h>
52#include <asm/timex.h>
53#include <asm/io.h>
54
55#include "tick-internal.h"
56
57#define CREATE_TRACE_POINTS
58#include <trace/events/timer.h>
59
60__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
61
62EXPORT_SYMBOL(jiffies_64);
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153#define LVL_CLK_SHIFT 3
154#define LVL_CLK_DIV (1UL << LVL_CLK_SHIFT)
155#define LVL_CLK_MASK (LVL_CLK_DIV - 1)
156#define LVL_SHIFT(n) ((n) * LVL_CLK_SHIFT)
157#define LVL_GRAN(n) (1UL << LVL_SHIFT(n))
158
159
160
161
162
163
164#define LVL_START(n) ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
165
166
167#define LVL_BITS 6
168#define LVL_SIZE (1UL << LVL_BITS)
169#define LVL_MASK (LVL_SIZE - 1)
170#define LVL_OFFS(n) ((n) * LVL_SIZE)
171
172
173#if HZ > 100
174# define LVL_DEPTH 9
175# else
176# define LVL_DEPTH 8
177#endif
178
179
180#define WHEEL_TIMEOUT_CUTOFF (LVL_START(LVL_DEPTH))
181#define WHEEL_TIMEOUT_MAX (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
182
183
184
185
186
187#define WHEEL_SIZE (LVL_SIZE * LVL_DEPTH)
188
189#ifdef CONFIG_NO_HZ_COMMON
190# define NR_BASES 2
191# define BASE_STD 0
192# define BASE_DEF 1
193#else
194# define NR_BASES 1
195# define BASE_STD 0
196# define BASE_DEF 0
197#endif
198
199struct timer_base {
200 raw_spinlock_t lock;
201 struct timer_list *running_timer;
202#ifdef CONFIG_PREEMPT_RT
203 spinlock_t expiry_lock;
204 atomic_t timer_waiters;
205#endif
206 unsigned long clk;
207 unsigned long next_expiry;
208 unsigned int cpu;
209 bool next_expiry_recalc;
210 bool is_idle;
211 bool timers_pending;
212 DECLARE_BITMAP(pending_map, WHEEL_SIZE);
213 struct hlist_head vectors[WHEEL_SIZE];
214} ____cacheline_aligned;
215
216static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
217
218#ifdef CONFIG_NO_HZ_COMMON
219
220static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
221static DEFINE_MUTEX(timer_keys_mutex);
222
223static void timer_update_keys(struct work_struct *work);
224static DECLARE_WORK(timer_update_work, timer_update_keys);
225
226#ifdef CONFIG_SMP
227static unsigned int sysctl_timer_migration = 1;
228
229DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
230
231static void timers_update_migration(void)
232{
233 if (sysctl_timer_migration && tick_nohz_active)
234 static_branch_enable(&timers_migration_enabled);
235 else
236 static_branch_disable(&timers_migration_enabled);
237}
238
239#ifdef CONFIG_SYSCTL
240static int timer_migration_handler(struct ctl_table *table, int write,
241 void *buffer, size_t *lenp, loff_t *ppos)
242{
243 int ret;
244
245 mutex_lock(&timer_keys_mutex);
246 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
247 if (!ret && write)
248 timers_update_migration();
249 mutex_unlock(&timer_keys_mutex);
250 return ret;
251}
252
253static struct ctl_table timer_sysctl[] = {
254 {
255 .procname = "timer_migration",
256 .data = &sysctl_timer_migration,
257 .maxlen = sizeof(unsigned int),
258 .mode = 0644,
259 .proc_handler = timer_migration_handler,
260 .extra1 = SYSCTL_ZERO,
261 .extra2 = SYSCTL_ONE,
262 },
263 {}
264};
265
266static int __init timer_sysctl_init(void)
267{
268 register_sysctl("kernel", timer_sysctl);
269 return 0;
270}
271device_initcall(timer_sysctl_init);
272#endif
273#else
274static inline void timers_update_migration(void) { }
275#endif
276
277static void timer_update_keys(struct work_struct *work)
278{
279 mutex_lock(&timer_keys_mutex);
280 timers_update_migration();
281 static_branch_enable(&timers_nohz_active);
282 mutex_unlock(&timer_keys_mutex);
283}
284
285void timers_update_nohz(void)
286{
287 schedule_work(&timer_update_work);
288}
289
290static inline bool is_timers_nohz_active(void)
291{
292 return static_branch_unlikely(&timers_nohz_active);
293}
294#else
295static inline bool is_timers_nohz_active(void) { return false; }
296#endif
297
298static unsigned long round_jiffies_common(unsigned long j, int cpu,
299 bool force_up)
300{
301 int rem;
302 unsigned long original = j;
303
304
305
306
307
308
309
310
311
312 j += cpu * 3;
313
314 rem = j % HZ;
315
316
317
318
319
320
321
322
323 if (rem < HZ/4 && !force_up)
324 j = j - rem;
325 else
326 j = j - rem + HZ;
327
328
329 j -= cpu * 3;
330
331
332
333
334
335 return time_is_after_jiffies(j) ? j : original;
336}
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358unsigned long __round_jiffies(unsigned long j, int cpu)
359{
360 return round_jiffies_common(j, cpu, false);
361}
362EXPORT_SYMBOL_GPL(__round_jiffies);
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384unsigned long __round_jiffies_relative(unsigned long j, int cpu)
385{
386 unsigned long j0 = jiffies;
387
388
389 return round_jiffies_common(j + j0, cpu, false) - j0;
390}
391EXPORT_SYMBOL_GPL(__round_jiffies_relative);
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408unsigned long round_jiffies(unsigned long j)
409{
410 return round_jiffies_common(j, raw_smp_processor_id(), false);
411}
412EXPORT_SYMBOL_GPL(round_jiffies);
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429unsigned long round_jiffies_relative(unsigned long j)
430{
431 return __round_jiffies_relative(j, raw_smp_processor_id());
432}
433EXPORT_SYMBOL_GPL(round_jiffies_relative);
434
435
436
437
438
439
440
441
442
443
444
445unsigned long __round_jiffies_up(unsigned long j, int cpu)
446{
447 return round_jiffies_common(j, cpu, true);
448}
449EXPORT_SYMBOL_GPL(__round_jiffies_up);
450
451
452
453
454
455
456
457
458
459
460
461unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
462{
463 unsigned long j0 = jiffies;
464
465
466 return round_jiffies_common(j + j0, cpu, true) - j0;
467}
468EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
469
470
471
472
473
474
475
476
477
478
479unsigned long round_jiffies_up(unsigned long j)
480{
481 return round_jiffies_common(j, raw_smp_processor_id(), true);
482}
483EXPORT_SYMBOL_GPL(round_jiffies_up);
484
485
486
487
488
489
490
491
492
493
494unsigned long round_jiffies_up_relative(unsigned long j)
495{
496 return __round_jiffies_up_relative(j, raw_smp_processor_id());
497}
498EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
499
500
501static inline unsigned int timer_get_idx(struct timer_list *timer)
502{
503 return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
504}
505
506static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
507{
508 timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
509 idx << TIMER_ARRAYSHIFT;
510}
511
512
513
514
515
516static inline unsigned calc_index(unsigned long expires, unsigned lvl,
517 unsigned long *bucket_expiry)
518{
519
520
521
522
523
524
525
526
527
528 expires = (expires >> LVL_SHIFT(lvl)) + 1;
529 *bucket_expiry = expires << LVL_SHIFT(lvl);
530 return LVL_OFFS(lvl) + (expires & LVL_MASK);
531}
532
533static int calc_wheel_index(unsigned long expires, unsigned long clk,
534 unsigned long *bucket_expiry)
535{
536 unsigned long delta = expires - clk;
537 unsigned int idx;
538
539 if (delta < LVL_START(1)) {
540 idx = calc_index(expires, 0, bucket_expiry);
541 } else if (delta < LVL_START(2)) {
542 idx = calc_index(expires, 1, bucket_expiry);
543 } else if (delta < LVL_START(3)) {
544 idx = calc_index(expires, 2, bucket_expiry);
545 } else if (delta < LVL_START(4)) {
546 idx = calc_index(expires, 3, bucket_expiry);
547 } else if (delta < LVL_START(5)) {
548 idx = calc_index(expires, 4, bucket_expiry);
549 } else if (delta < LVL_START(6)) {
550 idx = calc_index(expires, 5, bucket_expiry);
551 } else if (delta < LVL_START(7)) {
552 idx = calc_index(expires, 6, bucket_expiry);
553 } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
554 idx = calc_index(expires, 7, bucket_expiry);
555 } else if ((long) delta < 0) {
556 idx = clk & LVL_MASK;
557 *bucket_expiry = clk;
558 } else {
559
560
561
562
563 if (delta >= WHEEL_TIMEOUT_CUTOFF)
564 expires = clk + WHEEL_TIMEOUT_MAX;
565
566 idx = calc_index(expires, LVL_DEPTH - 1, bucket_expiry);
567 }
568 return idx;
569}
570
571static void
572trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
573{
574 if (!is_timers_nohz_active())
575 return;
576
577
578
579
580
581 if (timer->flags & TIMER_DEFERRABLE) {
582 if (tick_nohz_full_cpu(base->cpu))
583 wake_up_nohz_cpu(base->cpu);
584 return;
585 }
586
587
588
589
590
591
592 if (base->is_idle)
593 wake_up_nohz_cpu(base->cpu);
594}
595
596
597
598
599
600
601static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
602 unsigned int idx, unsigned long bucket_expiry)
603{
604
605 hlist_add_head(&timer->entry, base->vectors + idx);
606 __set_bit(idx, base->pending_map);
607 timer_set_idx(timer, idx);
608
609 trace_timer_start(timer, timer->expires, timer->flags);
610
611
612
613
614
615
616 if (time_before(bucket_expiry, base->next_expiry)) {
617
618
619
620
621 base->next_expiry = bucket_expiry;
622 base->timers_pending = true;
623 base->next_expiry_recalc = false;
624 trigger_dyntick_cpu(base, timer);
625 }
626}
627
628static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
629{
630 unsigned long bucket_expiry;
631 unsigned int idx;
632
633 idx = calc_wheel_index(timer->expires, base->clk, &bucket_expiry);
634 enqueue_timer(base, timer, idx, bucket_expiry);
635}
636
637#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
638
639static const struct debug_obj_descr timer_debug_descr;
640
641struct timer_hint {
642 void (*function)(struct timer_list *t);
643 long offset;
644};
645
646#define TIMER_HINT(fn, container, timr, hintfn) \
647 { \
648 .function = fn, \
649 .offset = offsetof(container, hintfn) - \
650 offsetof(container, timr) \
651 }
652
653static const struct timer_hint timer_hints[] = {
654 TIMER_HINT(delayed_work_timer_fn,
655 struct delayed_work, timer, work.func),
656 TIMER_HINT(kthread_delayed_work_timer_fn,
657 struct kthread_delayed_work, timer, work.func),
658};
659
660static void *timer_debug_hint(void *addr)
661{
662 struct timer_list *timer = addr;
663 int i;
664
665 for (i = 0; i < ARRAY_SIZE(timer_hints); i++) {
666 if (timer_hints[i].function == timer->function) {
667 void (**fn)(void) = addr + timer_hints[i].offset;
668
669 return *fn;
670 }
671 }
672
673 return timer->function;
674}
675
676static bool timer_is_static_object(void *addr)
677{
678 struct timer_list *timer = addr;
679
680 return (timer->entry.pprev == NULL &&
681 timer->entry.next == TIMER_ENTRY_STATIC);
682}
683
684
685
686
687
688static bool timer_fixup_init(void *addr, enum debug_obj_state state)
689{
690 struct timer_list *timer = addr;
691
692 switch (state) {
693 case ODEBUG_STATE_ACTIVE:
694 del_timer_sync(timer);
695 debug_object_init(timer, &timer_debug_descr);
696 return true;
697 default:
698 return false;
699 }
700}
701
702
703static void stub_timer(struct timer_list *unused)
704{
705 WARN_ON(1);
706}
707
708
709
710
711
712
713static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
714{
715 struct timer_list *timer = addr;
716
717 switch (state) {
718 case ODEBUG_STATE_NOTAVAILABLE:
719 timer_setup(timer, stub_timer, 0);
720 return true;
721
722 case ODEBUG_STATE_ACTIVE:
723 WARN_ON(1);
724 fallthrough;
725 default:
726 return false;
727 }
728}
729
730
731
732
733
734static bool timer_fixup_free(void *addr, enum debug_obj_state state)
735{
736 struct timer_list *timer = addr;
737
738 switch (state) {
739 case ODEBUG_STATE_ACTIVE:
740 del_timer_sync(timer);
741 debug_object_free(timer, &timer_debug_descr);
742 return true;
743 default:
744 return false;
745 }
746}
747
748
749
750
751
752static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
753{
754 struct timer_list *timer = addr;
755
756 switch (state) {
757 case ODEBUG_STATE_NOTAVAILABLE:
758 timer_setup(timer, stub_timer, 0);
759 return true;
760 default:
761 return false;
762 }
763}
764
765static const struct debug_obj_descr timer_debug_descr = {
766 .name = "timer_list",
767 .debug_hint = timer_debug_hint,
768 .is_static_object = timer_is_static_object,
769 .fixup_init = timer_fixup_init,
770 .fixup_activate = timer_fixup_activate,
771 .fixup_free = timer_fixup_free,
772 .fixup_assert_init = timer_fixup_assert_init,
773};
774
775static inline void debug_timer_init(struct timer_list *timer)
776{
777 debug_object_init(timer, &timer_debug_descr);
778}
779
780static inline void debug_timer_activate(struct timer_list *timer)
781{
782 debug_object_activate(timer, &timer_debug_descr);
783}
784
785static inline void debug_timer_deactivate(struct timer_list *timer)
786{
787 debug_object_deactivate(timer, &timer_debug_descr);
788}
789
790static inline void debug_timer_assert_init(struct timer_list *timer)
791{
792 debug_object_assert_init(timer, &timer_debug_descr);
793}
794
795static void do_init_timer(struct timer_list *timer,
796 void (*func)(struct timer_list *),
797 unsigned int flags,
798 const char *name, struct lock_class_key *key);
799
800void init_timer_on_stack_key(struct timer_list *timer,
801 void (*func)(struct timer_list *),
802 unsigned int flags,
803 const char *name, struct lock_class_key *key)
804{
805 debug_object_init_on_stack(timer, &timer_debug_descr);
806 do_init_timer(timer, func, flags, name, key);
807}
808EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
809
810void destroy_timer_on_stack(struct timer_list *timer)
811{
812 debug_object_free(timer, &timer_debug_descr);
813}
814EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
815
816#else
817static inline void debug_timer_init(struct timer_list *timer) { }
818static inline void debug_timer_activate(struct timer_list *timer) { }
819static inline void debug_timer_deactivate(struct timer_list *timer) { }
820static inline void debug_timer_assert_init(struct timer_list *timer) { }
821#endif
822
823static inline void debug_init(struct timer_list *timer)
824{
825 debug_timer_init(timer);
826 trace_timer_init(timer);
827}
828
829static inline void debug_deactivate(struct timer_list *timer)
830{
831 debug_timer_deactivate(timer);
832 trace_timer_cancel(timer);
833}
834
835static inline void debug_assert_init(struct timer_list *timer)
836{
837 debug_timer_assert_init(timer);
838}
839
840static void do_init_timer(struct timer_list *timer,
841 void (*func)(struct timer_list *),
842 unsigned int flags,
843 const char *name, struct lock_class_key *key)
844{
845 timer->entry.pprev = NULL;
846 timer->function = func;
847 if (WARN_ON_ONCE(flags & ~TIMER_INIT_FLAGS))
848 flags &= TIMER_INIT_FLAGS;
849 timer->flags = flags | raw_smp_processor_id();
850 lockdep_init_map(&timer->lockdep_map, name, key, 0);
851}
852
853
854
855
856
857
858
859
860
861
862
863
864
865void init_timer_key(struct timer_list *timer,
866 void (*func)(struct timer_list *), unsigned int flags,
867 const char *name, struct lock_class_key *key)
868{
869 debug_init(timer);
870 do_init_timer(timer, func, flags, name, key);
871}
872EXPORT_SYMBOL(init_timer_key);
873
874static inline void detach_timer(struct timer_list *timer, bool clear_pending)
875{
876 struct hlist_node *entry = &timer->entry;
877
878 debug_deactivate(timer);
879
880 __hlist_del(entry);
881 if (clear_pending)
882 entry->pprev = NULL;
883 entry->next = LIST_POISON2;
884}
885
886static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
887 bool clear_pending)
888{
889 unsigned idx = timer_get_idx(timer);
890
891 if (!timer_pending(timer))
892 return 0;
893
894 if (hlist_is_singular_node(&timer->entry, base->vectors + idx)) {
895 __clear_bit(idx, base->pending_map);
896 base->next_expiry_recalc = true;
897 }
898
899 detach_timer(timer, clear_pending);
900 return 1;
901}
902
903static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
904{
905 struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
906
907
908
909
910
911 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
912 base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
913 return base;
914}
915
916static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
917{
918 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
919
920
921
922
923
924 if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
925 base = this_cpu_ptr(&timer_bases[BASE_DEF]);
926 return base;
927}
928
929static inline struct timer_base *get_timer_base(u32 tflags)
930{
931 return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
932}
933
934static inline struct timer_base *
935get_target_base(struct timer_base *base, unsigned tflags)
936{
937#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
938 if (static_branch_likely(&timers_migration_enabled) &&
939 !(tflags & TIMER_PINNED))
940 return get_timer_cpu_base(tflags, get_nohz_timer_target());
941#endif
942 return get_timer_this_cpu_base(tflags);
943}
944
945static inline void forward_timer_base(struct timer_base *base)
946{
947 unsigned long jnow = READ_ONCE(jiffies);
948
949
950
951
952
953
954 if ((long)(jnow - base->clk) < 1)
955 return;
956
957
958
959
960
961 if (time_after(base->next_expiry, jnow)) {
962 base->clk = jnow;
963 } else {
964 if (WARN_ON_ONCE(time_before(base->next_expiry, base->clk)))
965 return;
966 base->clk = base->next_expiry;
967 }
968}
969
970
971
972
973
974
975
976
977
978
979
980
981
982static struct timer_base *lock_timer_base(struct timer_list *timer,
983 unsigned long *flags)
984 __acquires(timer->base->lock)
985{
986 for (;;) {
987 struct timer_base *base;
988 u32 tf;
989
990
991
992
993
994
995 tf = READ_ONCE(timer->flags);
996
997 if (!(tf & TIMER_MIGRATING)) {
998 base = get_timer_base(tf);
999 raw_spin_lock_irqsave(&base->lock, *flags);
1000 if (timer->flags == tf)
1001 return base;
1002 raw_spin_unlock_irqrestore(&base->lock, *flags);
1003 }
1004 cpu_relax();
1005 }
1006}
1007
1008#define MOD_TIMER_PENDING_ONLY 0x01
1009#define MOD_TIMER_REDUCE 0x02
1010#define MOD_TIMER_NOTPENDING 0x04
1011
1012static inline int
1013__mod_timer(struct timer_list *timer, unsigned long expires, unsigned int options)
1014{
1015 unsigned long clk = 0, flags, bucket_expiry;
1016 struct timer_base *base, *new_base;
1017 unsigned int idx = UINT_MAX;
1018 int ret = 0;
1019
1020 BUG_ON(!timer->function);
1021
1022
1023
1024
1025
1026
1027 if (!(options & MOD_TIMER_NOTPENDING) && timer_pending(timer)) {
1028
1029
1030
1031
1032
1033 long diff = timer->expires - expires;
1034
1035 if (!diff)
1036 return 1;
1037 if (options & MOD_TIMER_REDUCE && diff <= 0)
1038 return 1;
1039
1040
1041
1042
1043
1044
1045
1046 base = lock_timer_base(timer, &flags);
1047 forward_timer_base(base);
1048
1049 if (timer_pending(timer) && (options & MOD_TIMER_REDUCE) &&
1050 time_before_eq(timer->expires, expires)) {
1051 ret = 1;
1052 goto out_unlock;
1053 }
1054
1055 clk = base->clk;
1056 idx = calc_wheel_index(expires, clk, &bucket_expiry);
1057
1058
1059
1060
1061
1062
1063 if (idx == timer_get_idx(timer)) {
1064 if (!(options & MOD_TIMER_REDUCE))
1065 timer->expires = expires;
1066 else if (time_after(timer->expires, expires))
1067 timer->expires = expires;
1068 ret = 1;
1069 goto out_unlock;
1070 }
1071 } else {
1072 base = lock_timer_base(timer, &flags);
1073 forward_timer_base(base);
1074 }
1075
1076 ret = detach_if_pending(timer, base, false);
1077 if (!ret && (options & MOD_TIMER_PENDING_ONLY))
1078 goto out_unlock;
1079
1080 new_base = get_target_base(base, timer->flags);
1081
1082 if (base != new_base) {
1083
1084
1085
1086
1087
1088
1089
1090 if (likely(base->running_timer != timer)) {
1091
1092 timer->flags |= TIMER_MIGRATING;
1093
1094 raw_spin_unlock(&base->lock);
1095 base = new_base;
1096 raw_spin_lock(&base->lock);
1097 WRITE_ONCE(timer->flags,
1098 (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1099 forward_timer_base(base);
1100 }
1101 }
1102
1103 debug_timer_activate(timer);
1104
1105 timer->expires = expires;
1106
1107
1108
1109
1110
1111
1112 if (idx != UINT_MAX && clk == base->clk)
1113 enqueue_timer(base, timer, idx, bucket_expiry);
1114 else
1115 internal_add_timer(base, timer);
1116
1117out_unlock:
1118 raw_spin_unlock_irqrestore(&base->lock, flags);
1119
1120 return ret;
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133int mod_timer_pending(struct timer_list *timer, unsigned long expires)
1134{
1135 return __mod_timer(timer, expires, MOD_TIMER_PENDING_ONLY);
1136}
1137EXPORT_SYMBOL(mod_timer_pending);
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159int mod_timer(struct timer_list *timer, unsigned long expires)
1160{
1161 return __mod_timer(timer, expires, 0);
1162}
1163EXPORT_SYMBOL(mod_timer);
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174int timer_reduce(struct timer_list *timer, unsigned long expires)
1175{
1176 return __mod_timer(timer, expires, MOD_TIMER_REDUCE);
1177}
1178EXPORT_SYMBOL(timer_reduce);
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194void add_timer(struct timer_list *timer)
1195{
1196 BUG_ON(timer_pending(timer));
1197 __mod_timer(timer, timer->expires, MOD_TIMER_NOTPENDING);
1198}
1199EXPORT_SYMBOL(add_timer);
1200
1201
1202
1203
1204
1205
1206
1207
1208void add_timer_on(struct timer_list *timer, int cpu)
1209{
1210 struct timer_base *new_base, *base;
1211 unsigned long flags;
1212
1213 BUG_ON(timer_pending(timer) || !timer->function);
1214
1215 new_base = get_timer_cpu_base(timer->flags, cpu);
1216
1217
1218
1219
1220
1221
1222 base = lock_timer_base(timer, &flags);
1223 if (base != new_base) {
1224 timer->flags |= TIMER_MIGRATING;
1225
1226 raw_spin_unlock(&base->lock);
1227 base = new_base;
1228 raw_spin_lock(&base->lock);
1229 WRITE_ONCE(timer->flags,
1230 (timer->flags & ~TIMER_BASEMASK) | cpu);
1231 }
1232 forward_timer_base(base);
1233
1234 debug_timer_activate(timer);
1235 internal_add_timer(base, timer);
1236 raw_spin_unlock_irqrestore(&base->lock, flags);
1237}
1238EXPORT_SYMBOL_GPL(add_timer_on);
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251int del_timer(struct timer_list *timer)
1252{
1253 struct timer_base *base;
1254 unsigned long flags;
1255 int ret = 0;
1256
1257 debug_assert_init(timer);
1258
1259 if (timer_pending(timer)) {
1260 base = lock_timer_base(timer, &flags);
1261 ret = detach_if_pending(timer, base, true);
1262 raw_spin_unlock_irqrestore(&base->lock, flags);
1263 }
1264
1265 return ret;
1266}
1267EXPORT_SYMBOL(del_timer);
1268
1269
1270
1271
1272
1273
1274
1275
1276int try_to_del_timer_sync(struct timer_list *timer)
1277{
1278 struct timer_base *base;
1279 unsigned long flags;
1280 int ret = -1;
1281
1282 debug_assert_init(timer);
1283
1284 base = lock_timer_base(timer, &flags);
1285
1286 if (base->running_timer != timer)
1287 ret = detach_if_pending(timer, base, true);
1288
1289 raw_spin_unlock_irqrestore(&base->lock, flags);
1290
1291 return ret;
1292}
1293EXPORT_SYMBOL(try_to_del_timer_sync);
1294
1295#ifdef CONFIG_PREEMPT_RT
1296static __init void timer_base_init_expiry_lock(struct timer_base *base)
1297{
1298 spin_lock_init(&base->expiry_lock);
1299}
1300
1301static inline void timer_base_lock_expiry(struct timer_base *base)
1302{
1303 spin_lock(&base->expiry_lock);
1304}
1305
1306static inline void timer_base_unlock_expiry(struct timer_base *base)
1307{
1308 spin_unlock(&base->expiry_lock);
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318static void timer_sync_wait_running(struct timer_base *base)
1319{
1320 if (atomic_read(&base->timer_waiters)) {
1321 raw_spin_unlock_irq(&base->lock);
1322 spin_unlock(&base->expiry_lock);
1323 spin_lock(&base->expiry_lock);
1324 raw_spin_lock_irq(&base->lock);
1325 }
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338static void del_timer_wait_running(struct timer_list *timer)
1339{
1340 u32 tf;
1341
1342 tf = READ_ONCE(timer->flags);
1343 if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
1344 struct timer_base *base = get_timer_base(tf);
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 atomic_inc(&base->timer_waiters);
1355 spin_lock_bh(&base->expiry_lock);
1356 atomic_dec(&base->timer_waiters);
1357 spin_unlock_bh(&base->expiry_lock);
1358 }
1359}
1360#else
1361static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
1362static inline void timer_base_lock_expiry(struct timer_base *base) { }
1363static inline void timer_base_unlock_expiry(struct timer_base *base) { }
1364static inline void timer_sync_wait_running(struct timer_base *base) { }
1365static inline void del_timer_wait_running(struct timer_list *timer) { }
1366#endif
1367
1368#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405int del_timer_sync(struct timer_list *timer)
1406{
1407 int ret;
1408
1409#ifdef CONFIG_LOCKDEP
1410 unsigned long flags;
1411
1412
1413
1414
1415
1416 local_irq_save(flags);
1417 lock_map_acquire(&timer->lockdep_map);
1418 lock_map_release(&timer->lockdep_map);
1419 local_irq_restore(flags);
1420#endif
1421
1422
1423
1424
1425 WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
1426
1427
1428
1429
1430
1431 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
1432 lockdep_assert_preemption_enabled();
1433
1434 do {
1435 ret = try_to_del_timer_sync(timer);
1436
1437 if (unlikely(ret < 0)) {
1438 del_timer_wait_running(timer);
1439 cpu_relax();
1440 }
1441 } while (ret < 0);
1442
1443 return ret;
1444}
1445EXPORT_SYMBOL(del_timer_sync);
1446#endif
1447
1448static void call_timer_fn(struct timer_list *timer,
1449 void (*fn)(struct timer_list *),
1450 unsigned long baseclk)
1451{
1452 int count = preempt_count();
1453
1454#ifdef CONFIG_LOCKDEP
1455
1456
1457
1458
1459
1460
1461
1462 struct lockdep_map lockdep_map;
1463
1464 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1465#endif
1466
1467
1468
1469
1470
1471 lock_map_acquire(&lockdep_map);
1472
1473 trace_timer_expire_entry(timer, baseclk);
1474 fn(timer);
1475 trace_timer_expire_exit(timer);
1476
1477 lock_map_release(&lockdep_map);
1478
1479 if (count != preempt_count()) {
1480 WARN_ONCE(1, "timer: %pS preempt leak: %08x -> %08x\n",
1481 fn, count, preempt_count());
1482
1483
1484
1485
1486
1487
1488 preempt_count_set(count);
1489 }
1490}
1491
1492static void expire_timers(struct timer_base *base, struct hlist_head *head)
1493{
1494
1495
1496
1497
1498
1499 unsigned long baseclk = base->clk - 1;
1500
1501 while (!hlist_empty(head)) {
1502 struct timer_list *timer;
1503 void (*fn)(struct timer_list *);
1504
1505 timer = hlist_entry(head->first, struct timer_list, entry);
1506
1507 base->running_timer = timer;
1508 detach_timer(timer, true);
1509
1510 fn = timer->function;
1511
1512 if (timer->flags & TIMER_IRQSAFE) {
1513 raw_spin_unlock(&base->lock);
1514 call_timer_fn(timer, fn, baseclk);
1515 raw_spin_lock(&base->lock);
1516 base->running_timer = NULL;
1517 } else {
1518 raw_spin_unlock_irq(&base->lock);
1519 call_timer_fn(timer, fn, baseclk);
1520 raw_spin_lock_irq(&base->lock);
1521 base->running_timer = NULL;
1522 timer_sync_wait_running(base);
1523 }
1524 }
1525}
1526
1527static int collect_expired_timers(struct timer_base *base,
1528 struct hlist_head *heads)
1529{
1530 unsigned long clk = base->clk = base->next_expiry;
1531 struct hlist_head *vec;
1532 int i, levels = 0;
1533 unsigned int idx;
1534
1535 for (i = 0; i < LVL_DEPTH; i++) {
1536 idx = (clk & LVL_MASK) + i * LVL_SIZE;
1537
1538 if (__test_and_clear_bit(idx, base->pending_map)) {
1539 vec = base->vectors + idx;
1540 hlist_move_list(vec, heads++);
1541 levels++;
1542 }
1543
1544 if (clk & LVL_CLK_MASK)
1545 break;
1546
1547 clk >>= LVL_CLK_SHIFT;
1548 }
1549 return levels;
1550}
1551
1552
1553
1554
1555
1556
1557static int next_pending_bucket(struct timer_base *base, unsigned offset,
1558 unsigned clk)
1559{
1560 unsigned pos, start = offset + clk;
1561 unsigned end = offset + LVL_SIZE;
1562
1563 pos = find_next_bit(base->pending_map, end, start);
1564 if (pos < end)
1565 return pos - start;
1566
1567 pos = find_next_bit(base->pending_map, start, offset);
1568 return pos < start ? pos + LVL_SIZE - start : -1;
1569}
1570
1571
1572
1573
1574
1575static unsigned long __next_timer_interrupt(struct timer_base *base)
1576{
1577 unsigned long clk, next, adj;
1578 unsigned lvl, offset = 0;
1579
1580 next = base->clk + NEXT_TIMER_MAX_DELTA;
1581 clk = base->clk;
1582 for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
1583 int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
1584 unsigned long lvl_clk = clk & LVL_CLK_MASK;
1585
1586 if (pos >= 0) {
1587 unsigned long tmp = clk + (unsigned long) pos;
1588
1589 tmp <<= LVL_SHIFT(lvl);
1590 if (time_before(tmp, next))
1591 next = tmp;
1592
1593
1594
1595
1596
1597 if (pos <= ((LVL_CLK_DIV - lvl_clk) & LVL_CLK_MASK))
1598 break;
1599 }
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636 adj = lvl_clk ? 1 : 0;
1637 clk >>= LVL_CLK_SHIFT;
1638 clk += adj;
1639 }
1640
1641 base->next_expiry_recalc = false;
1642 base->timers_pending = !(next == base->clk + NEXT_TIMER_MAX_DELTA);
1643
1644 return next;
1645}
1646
1647#ifdef CONFIG_NO_HZ_COMMON
1648
1649
1650
1651
1652static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
1653{
1654 u64 nextevt = hrtimer_get_next_event();
1655
1656
1657
1658
1659
1660 if (expires <= nextevt)
1661 return expires;
1662
1663
1664
1665
1666
1667 if (nextevt <= basem)
1668 return basem;
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678 return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1690{
1691 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1692 u64 expires = KTIME_MAX;
1693 unsigned long nextevt;
1694
1695
1696
1697
1698
1699 if (cpu_is_offline(smp_processor_id()))
1700 return expires;
1701
1702 raw_spin_lock(&base->lock);
1703 if (base->next_expiry_recalc)
1704 base->next_expiry = __next_timer_interrupt(base);
1705 nextevt = base->next_expiry;
1706
1707
1708
1709
1710
1711
1712 if (time_after(basej, base->clk)) {
1713 if (time_after(nextevt, basej))
1714 base->clk = basej;
1715 else if (time_after(nextevt, base->clk))
1716 base->clk = nextevt;
1717 }
1718
1719 if (time_before_eq(nextevt, basej)) {
1720 expires = basem;
1721 base->is_idle = false;
1722 } else {
1723 if (base->timers_pending)
1724 expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
1725
1726
1727
1728
1729
1730
1731
1732 if ((expires - basem) > TICK_NSEC)
1733 base->is_idle = true;
1734 }
1735 raw_spin_unlock(&base->lock);
1736
1737 return cmp_next_hrtimer_event(basem, expires);
1738}
1739
1740
1741
1742
1743
1744
1745void timer_clear_idle(void)
1746{
1747 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1748
1749
1750
1751
1752
1753
1754
1755 base->is_idle = false;
1756}
1757#endif
1758
1759
1760
1761
1762
1763static inline void __run_timers(struct timer_base *base)
1764{
1765 struct hlist_head heads[LVL_DEPTH];
1766 int levels;
1767
1768 if (time_before(jiffies, base->next_expiry))
1769 return;
1770
1771 timer_base_lock_expiry(base);
1772 raw_spin_lock_irq(&base->lock);
1773
1774 while (time_after_eq(jiffies, base->clk) &&
1775 time_after_eq(jiffies, base->next_expiry)) {
1776 levels = collect_expired_timers(base, heads);
1777
1778
1779
1780
1781
1782
1783
1784 WARN_ON_ONCE(!levels && !base->next_expiry_recalc
1785 && base->timers_pending);
1786 base->clk++;
1787 base->next_expiry = __next_timer_interrupt(base);
1788
1789 while (levels--)
1790 expire_timers(base, heads + levels);
1791 }
1792 raw_spin_unlock_irq(&base->lock);
1793 timer_base_unlock_expiry(base);
1794}
1795
1796
1797
1798
1799static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1800{
1801 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1802
1803 __run_timers(base);
1804 if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
1805 __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1806}
1807
1808
1809
1810
1811static void run_local_timers(void)
1812{
1813 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1814
1815 hrtimer_run_queues();
1816
1817 if (time_before(jiffies, base->next_expiry)) {
1818 if (!IS_ENABLED(CONFIG_NO_HZ_COMMON))
1819 return;
1820
1821 base++;
1822 if (time_before(jiffies, base->next_expiry))
1823 return;
1824 }
1825 raise_softirq(TIMER_SOFTIRQ);
1826}
1827
1828
1829
1830
1831
1832void update_process_times(int user_tick)
1833{
1834 struct task_struct *p = current;
1835
1836
1837 account_process_tick(p, user_tick);
1838 run_local_timers();
1839 rcu_sched_clock_irq(user_tick);
1840#ifdef CONFIG_IRQ_WORK
1841 if (in_irq())
1842 irq_work_tick();
1843#endif
1844 scheduler_tick();
1845 if (IS_ENABLED(CONFIG_POSIX_TIMERS))
1846 run_posix_cpu_timers();
1847}
1848
1849
1850
1851
1852
1853struct process_timer {
1854 struct timer_list timer;
1855 struct task_struct *task;
1856};
1857
1858static void process_timeout(struct timer_list *t)
1859{
1860 struct process_timer *timeout = from_timer(timeout, t, timer);
1861
1862 wake_up_process(timeout->task);
1863}
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896signed long __sched schedule_timeout(signed long timeout)
1897{
1898 struct process_timer timer;
1899 unsigned long expire;
1900
1901 switch (timeout)
1902 {
1903 case MAX_SCHEDULE_TIMEOUT:
1904
1905
1906
1907
1908
1909
1910
1911 schedule();
1912 goto out;
1913 default:
1914
1915
1916
1917
1918
1919
1920
1921 if (timeout < 0) {
1922 printk(KERN_ERR "schedule_timeout: wrong timeout "
1923 "value %lx\n", timeout);
1924 dump_stack();
1925 __set_current_state(TASK_RUNNING);
1926 goto out;
1927 }
1928 }
1929
1930 expire = timeout + jiffies;
1931
1932 timer.task = current;
1933 timer_setup_on_stack(&timer.timer, process_timeout, 0);
1934 __mod_timer(&timer.timer, expire, MOD_TIMER_NOTPENDING);
1935 schedule();
1936 del_singleshot_timer_sync(&timer.timer);
1937
1938
1939 destroy_timer_on_stack(&timer.timer);
1940
1941 timeout = expire - jiffies;
1942
1943 out:
1944 return timeout < 0 ? 0 : timeout;
1945}
1946EXPORT_SYMBOL(schedule_timeout);
1947
1948
1949
1950
1951
1952signed long __sched schedule_timeout_interruptible(signed long timeout)
1953{
1954 __set_current_state(TASK_INTERRUPTIBLE);
1955 return schedule_timeout(timeout);
1956}
1957EXPORT_SYMBOL(schedule_timeout_interruptible);
1958
1959signed long __sched schedule_timeout_killable(signed long timeout)
1960{
1961 __set_current_state(TASK_KILLABLE);
1962 return schedule_timeout(timeout);
1963}
1964EXPORT_SYMBOL(schedule_timeout_killable);
1965
1966signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1967{
1968 __set_current_state(TASK_UNINTERRUPTIBLE);
1969 return schedule_timeout(timeout);
1970}
1971EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1972
1973
1974
1975
1976
1977signed long __sched schedule_timeout_idle(signed long timeout)
1978{
1979 __set_current_state(TASK_IDLE);
1980 return schedule_timeout(timeout);
1981}
1982EXPORT_SYMBOL(schedule_timeout_idle);
1983
1984#ifdef CONFIG_HOTPLUG_CPU
1985static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
1986{
1987 struct timer_list *timer;
1988 int cpu = new_base->cpu;
1989
1990 while (!hlist_empty(head)) {
1991 timer = hlist_entry(head->first, struct timer_list, entry);
1992 detach_timer(timer, false);
1993 timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
1994 internal_add_timer(new_base, timer);
1995 }
1996}
1997
1998int timers_prepare_cpu(unsigned int cpu)
1999{
2000 struct timer_base *base;
2001 int b;
2002
2003 for (b = 0; b < NR_BASES; b++) {
2004 base = per_cpu_ptr(&timer_bases[b], cpu);
2005 base->clk = jiffies;
2006 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2007 base->next_expiry_recalc = false;
2008 base->timers_pending = false;
2009 base->is_idle = false;
2010 }
2011 return 0;
2012}
2013
2014int timers_dead_cpu(unsigned int cpu)
2015{
2016 struct timer_base *old_base;
2017 struct timer_base *new_base;
2018 int b, i;
2019
2020 BUG_ON(cpu_online(cpu));
2021
2022 for (b = 0; b < NR_BASES; b++) {
2023 old_base = per_cpu_ptr(&timer_bases[b], cpu);
2024 new_base = get_cpu_ptr(&timer_bases[b]);
2025
2026
2027
2028
2029 raw_spin_lock_irq(&new_base->lock);
2030 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2031
2032
2033
2034
2035
2036 forward_timer_base(new_base);
2037
2038 BUG_ON(old_base->running_timer);
2039
2040 for (i = 0; i < WHEEL_SIZE; i++)
2041 migrate_timer_list(new_base, old_base->vectors + i);
2042
2043 raw_spin_unlock(&old_base->lock);
2044 raw_spin_unlock_irq(&new_base->lock);
2045 put_cpu_ptr(&timer_bases);
2046 }
2047 return 0;
2048}
2049
2050#endif
2051
2052static void __init init_timer_cpu(int cpu)
2053{
2054 struct timer_base *base;
2055 int i;
2056
2057 for (i = 0; i < NR_BASES; i++) {
2058 base = per_cpu_ptr(&timer_bases[i], cpu);
2059 base->cpu = cpu;
2060 raw_spin_lock_init(&base->lock);
2061 base->clk = jiffies;
2062 base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2063 timer_base_init_expiry_lock(base);
2064 }
2065}
2066
2067static void __init init_timer_cpus(void)
2068{
2069 int cpu;
2070
2071 for_each_possible_cpu(cpu)
2072 init_timer_cpu(cpu);
2073}
2074
2075void __init init_timers(void)
2076{
2077 init_timer_cpus();
2078 posix_cputimers_init_work();
2079 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
2080}
2081
2082
2083
2084
2085
2086void msleep(unsigned int msecs)
2087{
2088 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2089
2090 while (timeout)
2091 timeout = schedule_timeout_uninterruptible(timeout);
2092}
2093
2094EXPORT_SYMBOL(msleep);
2095
2096
2097
2098
2099
2100unsigned long msleep_interruptible(unsigned int msecs)
2101{
2102 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
2103
2104 while (timeout && !signal_pending(current))
2105 timeout = schedule_timeout_interruptible(timeout);
2106 return jiffies_to_msecs(timeout);
2107}
2108
2109EXPORT_SYMBOL(msleep_interruptible);
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123void __sched usleep_range_state(unsigned long min, unsigned long max,
2124 unsigned int state)
2125{
2126 ktime_t exp = ktime_add_us(ktime_get(), min);
2127 u64 delta = (u64)(max - min) * NSEC_PER_USEC;
2128
2129 for (;;) {
2130 __set_current_state(state);
2131
2132 if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
2133 break;
2134 }
2135}
2136EXPORT_SYMBOL(usleep_range_state);
2137