1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/cpu.h>
26#include <linux/export.h>
27#include <linux/percpu.h>
28#include <linux/hrtimer.h>
29#include <linux/notifier.h>
30#include <linux/syscalls.h>
31#include <linux/interrupt.h>
32#include <linux/tick.h>
33#include <linux/err.h>
34#include <linux/debugobjects.h>
35#include <linux/sched/signal.h>
36#include <linux/sched/sysctl.h>
37#include <linux/sched/rt.h>
38#include <linux/sched/deadline.h>
39#include <linux/sched/nohz.h>
40#include <linux/sched/debug.h>
41#include <linux/timer.h>
42#include <linux/freezer.h>
43#include <linux/compat.h>
44
45#include <linux/uaccess.h>
46
47#include <trace/events/timer.h>
48
49#include "tick-internal.h"
50
51
52
53
54
55#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
56#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
57#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
58#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
59
60
61
62
63
64
65
66
67
68DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
69{
70 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
71 .clock_base =
72 {
73 {
74 .index = HRTIMER_BASE_MONOTONIC,
75 .clockid = CLOCK_MONOTONIC,
76 .get_time = &ktime_get,
77 },
78 {
79 .index = HRTIMER_BASE_REALTIME,
80 .clockid = CLOCK_REALTIME,
81 .get_time = &ktime_get_real,
82 },
83 {
84 .index = HRTIMER_BASE_BOOTTIME,
85 .clockid = CLOCK_BOOTTIME,
86 .get_time = &ktime_get_boottime,
87 },
88 {
89 .index = HRTIMER_BASE_TAI,
90 .clockid = CLOCK_TAI,
91 .get_time = &ktime_get_clocktai,
92 },
93 {
94 .index = HRTIMER_BASE_MONOTONIC_SOFT,
95 .clockid = CLOCK_MONOTONIC,
96 .get_time = &ktime_get,
97 },
98 {
99 .index = HRTIMER_BASE_REALTIME_SOFT,
100 .clockid = CLOCK_REALTIME,
101 .get_time = &ktime_get_real,
102 },
103 {
104 .index = HRTIMER_BASE_BOOTTIME_SOFT,
105 .clockid = CLOCK_BOOTTIME,
106 .get_time = &ktime_get_boottime,
107 },
108 {
109 .index = HRTIMER_BASE_TAI_SOFT,
110 .clockid = CLOCK_TAI,
111 .get_time = &ktime_get_clocktai,
112 },
113 }
114};
115
116static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
117
118 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
119
120 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
121 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
122 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
123 [CLOCK_TAI] = HRTIMER_BASE_TAI,
124};
125
126
127
128
129
130#ifdef CONFIG_SMP
131
132
133
134
135
136
137static struct hrtimer_cpu_base migration_cpu_base = {
138 .clock_base = { {
139 .cpu_base = &migration_cpu_base,
140 .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
141 &migration_cpu_base.lock),
142 }, },
143};
144
145#define migration_base migration_cpu_base.clock_base[0]
146
147static inline bool is_migration_base(struct hrtimer_clock_base *base)
148{
149 return base == &migration_base;
150}
151
152
153
154
155
156
157
158
159
160
161
162
163
164static
165struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
166 unsigned long *flags)
167{
168 struct hrtimer_clock_base *base;
169
170 for (;;) {
171 base = READ_ONCE(timer->base);
172 if (likely(base != &migration_base)) {
173 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
174 if (likely(base == timer->base))
175 return base;
176
177 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
178 }
179 cpu_relax();
180 }
181}
182
183
184
185
186
187
188
189
190
191
192static int
193hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
194{
195 ktime_t expires;
196
197 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
198 return expires < new_base->cpu_base->expires_next;
199}
200
201static inline
202struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
203 int pinned)
204{
205#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
206 if (static_branch_likely(&timers_migration_enabled) && !pinned)
207 return &per_cpu(hrtimer_bases, get_nohz_timer_target());
208#endif
209 return base;
210}
211
212
213
214
215
216
217
218
219
220
221
222
223
224static inline struct hrtimer_clock_base *
225switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
226 int pinned)
227{
228 struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
229 struct hrtimer_clock_base *new_base;
230 int basenum = base->index;
231
232 this_cpu_base = this_cpu_ptr(&hrtimer_bases);
233 new_cpu_base = get_target_base(this_cpu_base, pinned);
234again:
235 new_base = &new_cpu_base->clock_base[basenum];
236
237 if (base != new_base) {
238
239
240
241
242
243
244
245
246
247 if (unlikely(hrtimer_callback_running(timer)))
248 return base;
249
250
251 WRITE_ONCE(timer->base, &migration_base);
252 raw_spin_unlock(&base->cpu_base->lock);
253 raw_spin_lock(&new_base->cpu_base->lock);
254
255 if (new_cpu_base != this_cpu_base &&
256 hrtimer_check_target(timer, new_base)) {
257 raw_spin_unlock(&new_base->cpu_base->lock);
258 raw_spin_lock(&base->cpu_base->lock);
259 new_cpu_base = this_cpu_base;
260 WRITE_ONCE(timer->base, base);
261 goto again;
262 }
263 WRITE_ONCE(timer->base, new_base);
264 } else {
265 if (new_cpu_base != this_cpu_base &&
266 hrtimer_check_target(timer, new_base)) {
267 new_cpu_base = this_cpu_base;
268 goto again;
269 }
270 }
271 return new_base;
272}
273
274#else
275
276static inline bool is_migration_base(struct hrtimer_clock_base *base)
277{
278 return false;
279}
280
281static inline struct hrtimer_clock_base *
282lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
283{
284 struct hrtimer_clock_base *base = timer->base;
285
286 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
287
288 return base;
289}
290
291# define switch_hrtimer_base(t, b, p) (b)
292
293#endif
294
295
296
297
298
299#if BITS_PER_LONG < 64
300
301
302
303s64 __ktime_divns(const ktime_t kt, s64 div)
304{
305 int sft = 0;
306 s64 dclc;
307 u64 tmp;
308
309 dclc = ktime_to_ns(kt);
310 tmp = dclc < 0 ? -dclc : dclc;
311
312
313 while (div >> 32) {
314 sft++;
315 div >>= 1;
316 }
317 tmp >>= sft;
318 do_div(tmp, (u32) div);
319 return dclc < 0 ? -tmp : tmp;
320}
321EXPORT_SYMBOL_GPL(__ktime_divns);
322#endif
323
324
325
326
327ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
328{
329 ktime_t res = ktime_add_unsafe(lhs, rhs);
330
331
332
333
334
335 if (res < 0 || res < lhs || res < rhs)
336 res = ktime_set(KTIME_SEC_MAX, 0);
337
338 return res;
339}
340
341EXPORT_SYMBOL_GPL(ktime_add_safe);
342
343#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
344
345static const struct debug_obj_descr hrtimer_debug_descr;
346
347static void *hrtimer_debug_hint(void *addr)
348{
349 return ((struct hrtimer *) addr)->function;
350}
351
352
353
354
355
356static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
357{
358 struct hrtimer *timer = addr;
359
360 switch (state) {
361 case ODEBUG_STATE_ACTIVE:
362 hrtimer_cancel(timer);
363 debug_object_init(timer, &hrtimer_debug_descr);
364 return true;
365 default:
366 return false;
367 }
368}
369
370
371
372
373
374
375static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
376{
377 switch (state) {
378 case ODEBUG_STATE_ACTIVE:
379 WARN_ON(1);
380 fallthrough;
381 default:
382 return false;
383 }
384}
385
386
387
388
389
390static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
391{
392 struct hrtimer *timer = addr;
393
394 switch (state) {
395 case ODEBUG_STATE_ACTIVE:
396 hrtimer_cancel(timer);
397 debug_object_free(timer, &hrtimer_debug_descr);
398 return true;
399 default:
400 return false;
401 }
402}
403
404static const struct debug_obj_descr hrtimer_debug_descr = {
405 .name = "hrtimer",
406 .debug_hint = hrtimer_debug_hint,
407 .fixup_init = hrtimer_fixup_init,
408 .fixup_activate = hrtimer_fixup_activate,
409 .fixup_free = hrtimer_fixup_free,
410};
411
412static inline void debug_hrtimer_init(struct hrtimer *timer)
413{
414 debug_object_init(timer, &hrtimer_debug_descr);
415}
416
417static inline void debug_hrtimer_activate(struct hrtimer *timer,
418 enum hrtimer_mode mode)
419{
420 debug_object_activate(timer, &hrtimer_debug_descr);
421}
422
423static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
424{
425 debug_object_deactivate(timer, &hrtimer_debug_descr);
426}
427
428static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
429 enum hrtimer_mode mode);
430
431void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
432 enum hrtimer_mode mode)
433{
434 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
435 __hrtimer_init(timer, clock_id, mode);
436}
437EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
438
439static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
440 clockid_t clock_id, enum hrtimer_mode mode);
441
442void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
443 clockid_t clock_id, enum hrtimer_mode mode)
444{
445 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
446 __hrtimer_init_sleeper(sl, clock_id, mode);
447}
448EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
449
450void destroy_hrtimer_on_stack(struct hrtimer *timer)
451{
452 debug_object_free(timer, &hrtimer_debug_descr);
453}
454EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
455
456#else
457
458static inline void debug_hrtimer_init(struct hrtimer *timer) { }
459static inline void debug_hrtimer_activate(struct hrtimer *timer,
460 enum hrtimer_mode mode) { }
461static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
462#endif
463
464static inline void
465debug_init(struct hrtimer *timer, clockid_t clockid,
466 enum hrtimer_mode mode)
467{
468 debug_hrtimer_init(timer);
469 trace_hrtimer_init(timer, clockid, mode);
470}
471
472static inline void debug_activate(struct hrtimer *timer,
473 enum hrtimer_mode mode)
474{
475 debug_hrtimer_activate(timer, mode);
476 trace_hrtimer_start(timer, mode);
477}
478
479static inline void debug_deactivate(struct hrtimer *timer)
480{
481 debug_hrtimer_deactivate(timer);
482 trace_hrtimer_cancel(timer);
483}
484
485static struct hrtimer_clock_base *
486__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
487{
488 unsigned int idx;
489
490 if (!*active)
491 return NULL;
492
493 idx = __ffs(*active);
494 *active &= ~(1U << idx);
495
496 return &cpu_base->clock_base[idx];
497}
498
499#define for_each_active_base(base, cpu_base, active) \
500 while ((base = __next_base((cpu_base), &(active))))
501
502static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
503 const struct hrtimer *exclude,
504 unsigned int active,
505 ktime_t expires_next)
506{
507 struct hrtimer_clock_base *base;
508 ktime_t expires;
509
510 for_each_active_base(base, cpu_base, active) {
511 struct timerqueue_node *next;
512 struct hrtimer *timer;
513
514 next = timerqueue_getnext(&base->active);
515 timer = container_of(next, struct hrtimer, node);
516 if (timer == exclude) {
517
518 next = timerqueue_iterate_next(next);
519 if (!next)
520 continue;
521
522 timer = container_of(next, struct hrtimer, node);
523 }
524 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
525 if (expires < expires_next) {
526 expires_next = expires;
527
528
529 if (exclude)
530 continue;
531
532 if (timer->is_soft)
533 cpu_base->softirq_next_timer = timer;
534 else
535 cpu_base->next_timer = timer;
536 }
537 }
538
539
540
541
542
543 if (expires_next < 0)
544 expires_next = 0;
545 return expires_next;
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568static ktime_t
569__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
570{
571 unsigned int active;
572 struct hrtimer *next_timer = NULL;
573 ktime_t expires_next = KTIME_MAX;
574
575 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
576 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
577 cpu_base->softirq_next_timer = NULL;
578 expires_next = __hrtimer_next_event_base(cpu_base, NULL,
579 active, KTIME_MAX);
580
581 next_timer = cpu_base->softirq_next_timer;
582 }
583
584 if (active_mask & HRTIMER_ACTIVE_HARD) {
585 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
586 cpu_base->next_timer = next_timer;
587 expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
588 expires_next);
589 }
590
591 return expires_next;
592}
593
594static ktime_t hrtimer_update_next_event(struct hrtimer_cpu_base *cpu_base)
595{
596 ktime_t expires_next, soft = KTIME_MAX;
597
598
599
600
601
602
603 if (!cpu_base->softirq_activated) {
604 soft = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
605
606
607
608
609 cpu_base->softirq_expires_next = soft;
610 }
611
612 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
613
614
615
616
617 if (expires_next > soft) {
618 cpu_base->next_timer = cpu_base->softirq_next_timer;
619 expires_next = soft;
620 }
621
622 return expires_next;
623}
624
625static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
626{
627 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
628 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
629 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
630
631 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
632 offs_real, offs_boot, offs_tai);
633
634 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
635 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
636 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
637
638 return now;
639}
640
641
642
643
644static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
645{
646 return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
647 cpu_base->hres_active : 0;
648}
649
650static inline int hrtimer_hres_active(void)
651{
652 return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
653}
654
655
656
657
658
659
660static void
661hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
662{
663 ktime_t expires_next;
664
665 expires_next = hrtimer_update_next_event(cpu_base);
666
667 if (skip_equal && expires_next == cpu_base->expires_next)
668 return;
669
670 cpu_base->expires_next = expires_next;
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
690 return;
691
692 tick_program_event(cpu_base->expires_next, 1);
693}
694
695
696#ifdef CONFIG_HIGH_RES_TIMERS
697
698
699
700
701static bool hrtimer_hres_enabled __read_mostly = true;
702unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
703EXPORT_SYMBOL_GPL(hrtimer_resolution);
704
705
706
707
708static int __init setup_hrtimer_hres(char *str)
709{
710 return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
711}
712
713__setup("highres=", setup_hrtimer_hres);
714
715
716
717
718static inline int hrtimer_is_hres_enabled(void)
719{
720 return hrtimer_hres_enabled;
721}
722
723
724
725
726
727
728static void retrigger_next_event(void *arg)
729{
730 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
731
732 if (!__hrtimer_hres_active(base))
733 return;
734
735 raw_spin_lock(&base->lock);
736 hrtimer_update_base(base);
737 hrtimer_force_reprogram(base, 0);
738 raw_spin_unlock(&base->lock);
739}
740
741
742
743
744static void hrtimer_switch_to_hres(void)
745{
746 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
747
748 if (tick_init_highres()) {
749 pr_warn("Could not switch to high resolution mode on CPU %u\n",
750 base->cpu);
751 return;
752 }
753 base->hres_active = 1;
754 hrtimer_resolution = HIGH_RES_NSEC;
755
756 tick_setup_sched_timer();
757
758 retrigger_next_event(NULL);
759}
760
761static void clock_was_set_work(struct work_struct *work)
762{
763 clock_was_set();
764}
765
766static DECLARE_WORK(hrtimer_work, clock_was_set_work);
767
768
769
770
771
772void clock_was_set_delayed(void)
773{
774 schedule_work(&hrtimer_work);
775}
776
777#else
778
779static inline int hrtimer_is_hres_enabled(void) { return 0; }
780static inline void hrtimer_switch_to_hres(void) { }
781static inline void retrigger_next_event(void *arg) { }
782
783#endif
784
785
786
787
788
789
790
791
792static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
793{
794 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
795 struct hrtimer_clock_base *base = timer->base;
796 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
797
798 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
799
800
801
802
803
804 if (expires < 0)
805 expires = 0;
806
807 if (timer->is_soft) {
808
809
810
811
812
813
814
815 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
816
817 if (timer_cpu_base->softirq_activated)
818 return;
819
820 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
821 return;
822
823 timer_cpu_base->softirq_next_timer = timer;
824 timer_cpu_base->softirq_expires_next = expires;
825
826 if (!ktime_before(expires, timer_cpu_base->expires_next) ||
827 !reprogram)
828 return;
829 }
830
831
832
833
834
835 if (base->cpu_base != cpu_base)
836 return;
837
838
839
840
841
842
843
844
845 if (cpu_base->in_hrtirq)
846 return;
847
848 if (expires >= cpu_base->expires_next)
849 return;
850
851
852 cpu_base->next_timer = timer;
853 cpu_base->expires_next = expires;
854
855
856
857
858
859
860
861
862
863
864 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
865 return;
866
867
868
869
870
871 tick_program_event(expires, 1);
872}
873
874
875
876
877
878
879
880
881
882
883
884
885void clock_was_set(void)
886{
887#ifdef CONFIG_HIGH_RES_TIMERS
888
889 on_each_cpu(retrigger_next_event, NULL, 1);
890#endif
891 timerfd_clock_was_set();
892}
893
894
895
896
897
898
899
900void hrtimers_resume(void)
901{
902 lockdep_assert_irqs_disabled();
903
904 retrigger_next_event(NULL);
905
906 clock_was_set_delayed();
907}
908
909
910
911
912static inline
913void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
914{
915 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
916}
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
936{
937 u64 orun = 1;
938 ktime_t delta;
939
940 delta = ktime_sub(now, hrtimer_get_expires(timer));
941
942 if (delta < 0)
943 return 0;
944
945 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
946 return 0;
947
948 if (interval < hrtimer_resolution)
949 interval = hrtimer_resolution;
950
951 if (unlikely(delta >= interval)) {
952 s64 incr = ktime_to_ns(interval);
953
954 orun = ktime_divns(delta, incr);
955 hrtimer_add_expires_ns(timer, incr * orun);
956 if (hrtimer_get_expires_tv64(timer) > now)
957 return orun;
958
959
960
961
962 orun++;
963 }
964 hrtimer_add_expires(timer, interval);
965
966 return orun;
967}
968EXPORT_SYMBOL_GPL(hrtimer_forward);
969
970
971
972
973
974
975
976
977
978static int enqueue_hrtimer(struct hrtimer *timer,
979 struct hrtimer_clock_base *base,
980 enum hrtimer_mode mode)
981{
982 debug_activate(timer, mode);
983
984 base->cpu_base->active_bases |= 1 << base->index;
985
986
987 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
988
989 return timerqueue_add(&base->active, &timer->node);
990}
991
992
993
994
995
996
997
998
999
1000
1001
1002static void __remove_hrtimer(struct hrtimer *timer,
1003 struct hrtimer_clock_base *base,
1004 u8 newstate, int reprogram)
1005{
1006 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1007 u8 state = timer->state;
1008
1009
1010 WRITE_ONCE(timer->state, newstate);
1011 if (!(state & HRTIMER_STATE_ENQUEUED))
1012 return;
1013
1014 if (!timerqueue_del(&base->active, &timer->node))
1015 cpu_base->active_bases &= ~(1 << base->index);
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 if (reprogram && timer == cpu_base->next_timer)
1026 hrtimer_force_reprogram(cpu_base, 1);
1027}
1028
1029
1030
1031
1032static inline int
1033remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
1034{
1035 u8 state = timer->state;
1036
1037 if (state & HRTIMER_STATE_ENQUEUED) {
1038 int reprogram;
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048 debug_deactivate(timer);
1049 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1050
1051 if (!restart)
1052 state = HRTIMER_STATE_INACTIVE;
1053
1054 __remove_hrtimer(timer, base, state, reprogram);
1055 return 1;
1056 }
1057 return 0;
1058}
1059
1060static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
1061 const enum hrtimer_mode mode)
1062{
1063#ifdef CONFIG_TIME_LOW_RES
1064
1065
1066
1067
1068
1069 timer->is_rel = mode & HRTIMER_MODE_REL;
1070 if (timer->is_rel)
1071 tim = ktime_add_safe(tim, hrtimer_resolution);
1072#endif
1073 return tim;
1074}
1075
1076static void
1077hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
1078{
1079 ktime_t expires;
1080
1081
1082
1083
1084 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
1085
1086
1087
1088
1089
1090
1091 if (expires == KTIME_MAX)
1092 return;
1093
1094
1095
1096
1097
1098 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
1099}
1100
1101static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1102 u64 delta_ns, const enum hrtimer_mode mode,
1103 struct hrtimer_clock_base *base)
1104{
1105 struct hrtimer_clock_base *new_base;
1106
1107
1108 remove_hrtimer(timer, base, true);
1109
1110 if (mode & HRTIMER_MODE_REL)
1111 tim = ktime_add_safe(tim, base->get_time());
1112
1113 tim = hrtimer_update_lowres(timer, tim, mode);
1114
1115 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1116
1117
1118 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1119
1120 return enqueue_hrtimer(timer, new_base, mode);
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1133 u64 delta_ns, const enum hrtimer_mode mode)
1134{
1135 struct hrtimer_clock_base *base;
1136 unsigned long flags;
1137
1138
1139
1140
1141
1142
1143 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1144 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
1145 else
1146 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
1147
1148 base = lock_hrtimer_base(timer, &flags);
1149
1150 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
1151 hrtimer_reprogram(timer, true);
1152
1153 unlock_hrtimer_base(timer, &flags);
1154}
1155EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168int hrtimer_try_to_cancel(struct hrtimer *timer)
1169{
1170 struct hrtimer_clock_base *base;
1171 unsigned long flags;
1172 int ret = -1;
1173
1174
1175
1176
1177
1178
1179
1180 if (!hrtimer_active(timer))
1181 return 0;
1182
1183 base = lock_hrtimer_base(timer, &flags);
1184
1185 if (!hrtimer_callback_running(timer))
1186 ret = remove_hrtimer(timer, base, false);
1187
1188 unlock_hrtimer_base(timer, &flags);
1189
1190 return ret;
1191
1192}
1193EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1194
1195#ifdef CONFIG_PREEMPT_RT
1196static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
1197{
1198 spin_lock_init(&base->softirq_expiry_lock);
1199}
1200
1201static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
1202{
1203 spin_lock(&base->softirq_expiry_lock);
1204}
1205
1206static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
1207{
1208 spin_unlock(&base->softirq_expiry_lock);
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
1219 unsigned long flags)
1220{
1221 if (atomic_read(&cpu_base->timer_waiters)) {
1222 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1223 spin_unlock(&cpu_base->softirq_expiry_lock);
1224 spin_lock(&cpu_base->softirq_expiry_lock);
1225 raw_spin_lock_irq(&cpu_base->lock);
1226 }
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245void hrtimer_cancel_wait_running(const struct hrtimer *timer)
1246{
1247
1248 struct hrtimer_clock_base *base = READ_ONCE(timer->base);
1249
1250
1251
1252
1253
1254 if (!timer->is_soft || is_migration_base(base)) {
1255 cpu_relax();
1256 return;
1257 }
1258
1259
1260
1261
1262
1263
1264
1265
1266 atomic_inc(&base->cpu_base->timer_waiters);
1267 spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
1268 atomic_dec(&base->cpu_base->timer_waiters);
1269 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
1270}
1271#else
1272static inline void
1273hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
1274static inline void
1275hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
1276static inline void
1277hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
1278static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
1279 unsigned long flags) { }
1280#endif
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290int hrtimer_cancel(struct hrtimer *timer)
1291{
1292 int ret;
1293
1294 do {
1295 ret = hrtimer_try_to_cancel(timer);
1296
1297 if (ret < 0)
1298 hrtimer_cancel_wait_running(timer);
1299 } while (ret < 0);
1300 return ret;
1301}
1302EXPORT_SYMBOL_GPL(hrtimer_cancel);
1303
1304
1305
1306
1307
1308
1309ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1310{
1311 unsigned long flags;
1312 ktime_t rem;
1313
1314 lock_hrtimer_base(timer, &flags);
1315 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
1316 rem = hrtimer_expires_remaining_adjusted(timer);
1317 else
1318 rem = hrtimer_expires_remaining(timer);
1319 unlock_hrtimer_base(timer, &flags);
1320
1321 return rem;
1322}
1323EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
1324
1325#ifdef CONFIG_NO_HZ_COMMON
1326
1327
1328
1329
1330
1331u64 hrtimer_get_next_event(void)
1332{
1333 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1334 u64 expires = KTIME_MAX;
1335 unsigned long flags;
1336
1337 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1338
1339 if (!__hrtimer_hres_active(cpu_base))
1340 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1341
1342 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1343
1344 return expires;
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354u64 hrtimer_next_event_without(const struct hrtimer *exclude)
1355{
1356 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1357 u64 expires = KTIME_MAX;
1358 unsigned long flags;
1359
1360 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1361
1362 if (__hrtimer_hres_active(cpu_base)) {
1363 unsigned int active;
1364
1365 if (!cpu_base->softirq_activated) {
1366 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
1367 expires = __hrtimer_next_event_base(cpu_base, exclude,
1368 active, KTIME_MAX);
1369 }
1370 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
1371 expires = __hrtimer_next_event_base(cpu_base, exclude, active,
1372 expires);
1373 }
1374
1375 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1376
1377 return expires;
1378}
1379#endif
1380
1381static inline int hrtimer_clockid_to_base(clockid_t clock_id)
1382{
1383 if (likely(clock_id < MAX_CLOCKS)) {
1384 int base = hrtimer_clock_to_base_table[clock_id];
1385
1386 if (likely(base != HRTIMER_MAX_CLOCK_BASES))
1387 return base;
1388 }
1389 WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
1390 return HRTIMER_BASE_MONOTONIC;
1391}
1392
1393static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1394 enum hrtimer_mode mode)
1395{
1396 bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
1397 struct hrtimer_cpu_base *cpu_base;
1398 int base;
1399
1400
1401
1402
1403
1404
1405
1406 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
1407 softtimer = true;
1408
1409 memset(timer, 0, sizeof(struct hrtimer));
1410
1411 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1412
1413
1414
1415
1416
1417
1418 if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
1419 clock_id = CLOCK_MONOTONIC;
1420
1421 base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
1422 base += hrtimer_clockid_to_base(clock_id);
1423 timer->is_soft = softtimer;
1424 timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
1425 timer->base = &cpu_base->clock_base[base];
1426 timerqueue_init(&timer->node);
1427}
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1442 enum hrtimer_mode mode)
1443{
1444 debug_init(timer, clock_id, mode);
1445 __hrtimer_init(timer, clock_id, mode);
1446}
1447EXPORT_SYMBOL_GPL(hrtimer_init);
1448
1449
1450
1451
1452
1453
1454
1455
1456bool hrtimer_active(const struct hrtimer *timer)
1457{
1458 struct hrtimer_clock_base *base;
1459 unsigned int seq;
1460
1461 do {
1462 base = READ_ONCE(timer->base);
1463 seq = raw_read_seqcount_begin(&base->seq);
1464
1465 if (timer->state != HRTIMER_STATE_INACTIVE ||
1466 base->running == timer)
1467 return true;
1468
1469 } while (read_seqcount_retry(&base->seq, seq) ||
1470 base != READ_ONCE(timer->base));
1471
1472 return false;
1473}
1474EXPORT_SYMBOL_GPL(hrtimer_active);
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1495 struct hrtimer_clock_base *base,
1496 struct hrtimer *timer, ktime_t *now,
1497 unsigned long flags) __must_hold(&cpu_base->lock)
1498{
1499 enum hrtimer_restart (*fn)(struct hrtimer *);
1500 bool expires_in_hardirq;
1501 int restart;
1502
1503 lockdep_assert_held(&cpu_base->lock);
1504
1505 debug_deactivate(timer);
1506 base->running = timer;
1507
1508
1509
1510
1511
1512
1513
1514
1515 raw_write_seqcount_barrier(&base->seq);
1516
1517 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
1518 fn = timer->function;
1519
1520
1521
1522
1523
1524
1525 if (IS_ENABLED(CONFIG_TIME_LOW_RES))
1526 timer->is_rel = false;
1527
1528
1529
1530
1531
1532
1533 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1534 trace_hrtimer_expire_entry(timer, now);
1535 expires_in_hardirq = lockdep_hrtimer_enter(timer);
1536
1537 restart = fn(timer);
1538
1539 lockdep_hrtimer_exit(expires_in_hardirq);
1540 trace_hrtimer_expire_exit(timer);
1541 raw_spin_lock_irq(&cpu_base->lock);
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 if (restart != HRTIMER_NORESTART &&
1553 !(timer->state & HRTIMER_STATE_ENQUEUED))
1554 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
1555
1556
1557
1558
1559
1560
1561
1562
1563 raw_write_seqcount_barrier(&base->seq);
1564
1565 WARN_ON_ONCE(base->running != timer);
1566 base->running = NULL;
1567}
1568
1569static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
1570 unsigned long flags, unsigned int active_mask)
1571{
1572 struct hrtimer_clock_base *base;
1573 unsigned int active = cpu_base->active_bases & active_mask;
1574
1575 for_each_active_base(base, cpu_base, active) {
1576 struct timerqueue_node *node;
1577 ktime_t basenow;
1578
1579 basenow = ktime_add(now, base->offset);
1580
1581 while ((node = timerqueue_getnext(&base->active))) {
1582 struct hrtimer *timer;
1583
1584 timer = container_of(node, struct hrtimer, node);
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 if (basenow < hrtimer_get_softexpires_tv64(timer))
1599 break;
1600
1601 __run_hrtimer(cpu_base, base, timer, &basenow, flags);
1602 if (active_mask == HRTIMER_ACTIVE_SOFT)
1603 hrtimer_sync_wait_running(cpu_base, flags);
1604 }
1605 }
1606}
1607
1608static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
1609{
1610 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1611 unsigned long flags;
1612 ktime_t now;
1613
1614 hrtimer_cpu_base_lock_expiry(cpu_base);
1615 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1616
1617 now = hrtimer_update_base(cpu_base);
1618 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
1619
1620 cpu_base->softirq_activated = 0;
1621 hrtimer_update_softirq_timer(cpu_base, true);
1622
1623 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1624 hrtimer_cpu_base_unlock_expiry(cpu_base);
1625}
1626
1627#ifdef CONFIG_HIGH_RES_TIMERS
1628
1629
1630
1631
1632
1633void hrtimer_interrupt(struct clock_event_device *dev)
1634{
1635 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1636 ktime_t expires_next, now, entry_time, delta;
1637 unsigned long flags;
1638 int retries = 0;
1639
1640 BUG_ON(!cpu_base->hres_active);
1641 cpu_base->nr_events++;
1642 dev->next_event = KTIME_MAX;
1643
1644 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1645 entry_time = now = hrtimer_update_base(cpu_base);
1646retry:
1647 cpu_base->in_hrtirq = 1;
1648
1649
1650
1651
1652
1653
1654
1655 cpu_base->expires_next = KTIME_MAX;
1656
1657 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1658 cpu_base->softirq_expires_next = KTIME_MAX;
1659 cpu_base->softirq_activated = 1;
1660 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1661 }
1662
1663 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1664
1665
1666 expires_next = hrtimer_update_next_event(cpu_base);
1667
1668
1669
1670
1671 cpu_base->expires_next = expires_next;
1672 cpu_base->in_hrtirq = 0;
1673 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1674
1675
1676 if (!tick_program_event(expires_next, 0)) {
1677 cpu_base->hang_detected = 0;
1678 return;
1679 }
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1695 now = hrtimer_update_base(cpu_base);
1696 cpu_base->nr_retries++;
1697 if (++retries < 3)
1698 goto retry;
1699
1700
1701
1702
1703
1704
1705 cpu_base->nr_hangs++;
1706 cpu_base->hang_detected = 1;
1707 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1708
1709 delta = ktime_sub(now, entry_time);
1710 if ((unsigned int)delta > cpu_base->max_hang_time)
1711 cpu_base->max_hang_time = (unsigned int) delta;
1712
1713
1714
1715
1716 if (delta > 100 * NSEC_PER_MSEC)
1717 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1718 else
1719 expires_next = ktime_add(now, delta);
1720 tick_program_event(expires_next, 1);
1721 pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
1722}
1723
1724
1725static inline void __hrtimer_peek_ahead_timers(void)
1726{
1727 struct tick_device *td;
1728
1729 if (!hrtimer_hres_active())
1730 return;
1731
1732 td = this_cpu_ptr(&tick_cpu_device);
1733 if (td && td->evtdev)
1734 hrtimer_interrupt(td->evtdev);
1735}
1736
1737#else
1738
1739static inline void __hrtimer_peek_ahead_timers(void) { }
1740
1741#endif
1742
1743
1744
1745
1746void hrtimer_run_queues(void)
1747{
1748 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1749 unsigned long flags;
1750 ktime_t now;
1751
1752 if (__hrtimer_hres_active(cpu_base))
1753 return;
1754
1755
1756
1757
1758
1759
1760
1761
1762 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1763 hrtimer_switch_to_hres();
1764 return;
1765 }
1766
1767 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1768 now = hrtimer_update_base(cpu_base);
1769
1770 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1771 cpu_base->softirq_expires_next = KTIME_MAX;
1772 cpu_base->softirq_activated = 1;
1773 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1774 }
1775
1776 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1777 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1778}
1779
1780
1781
1782
1783static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1784{
1785 struct hrtimer_sleeper *t =
1786 container_of(timer, struct hrtimer_sleeper, timer);
1787 struct task_struct *task = t->task;
1788
1789 t->task = NULL;
1790 if (task)
1791 wake_up_process(task);
1792
1793 return HRTIMER_NORESTART;
1794}
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
1805 enum hrtimer_mode mode)
1806{
1807
1808
1809
1810
1811
1812
1813
1814 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
1815 mode |= HRTIMER_MODE_HARD;
1816
1817 hrtimer_start_expires(&sl->timer, mode);
1818}
1819EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
1820
1821static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
1822 clockid_t clock_id, enum hrtimer_mode mode)
1823{
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1844 if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
1845 mode |= HRTIMER_MODE_HARD;
1846 }
1847
1848 __hrtimer_init(&sl->timer, clock_id, mode);
1849 sl->timer.function = hrtimer_wakeup;
1850 sl->task = current;
1851}
1852
1853
1854
1855
1856
1857
1858
1859void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
1860 enum hrtimer_mode mode)
1861{
1862 debug_init(&sl->timer, clock_id, mode);
1863 __hrtimer_init_sleeper(sl, clock_id, mode);
1864
1865}
1866EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1867
1868int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
1869{
1870 switch(restart->nanosleep.type) {
1871#ifdef CONFIG_COMPAT_32BIT_TIME
1872 case TT_COMPAT:
1873 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
1874 return -EFAULT;
1875 break;
1876#endif
1877 case TT_NATIVE:
1878 if (put_timespec64(ts, restart->nanosleep.rmtp))
1879 return -EFAULT;
1880 break;
1881 default:
1882 BUG();
1883 }
1884 return -ERESTART_RESTARTBLOCK;
1885}
1886
1887static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1888{
1889 struct restart_block *restart;
1890
1891 do {
1892 set_current_state(TASK_INTERRUPTIBLE);
1893 hrtimer_sleeper_start_expires(t, mode);
1894
1895 if (likely(t->task))
1896 freezable_schedule();
1897
1898 hrtimer_cancel(&t->timer);
1899 mode = HRTIMER_MODE_ABS;
1900
1901 } while (t->task && !signal_pending(current));
1902
1903 __set_current_state(TASK_RUNNING);
1904
1905 if (!t->task)
1906 return 0;
1907
1908 restart = ¤t->restart_block;
1909 if (restart->nanosleep.type != TT_NONE) {
1910 ktime_t rem = hrtimer_expires_remaining(&t->timer);
1911 struct timespec64 rmt;
1912
1913 if (rem <= 0)
1914 return 0;
1915 rmt = ktime_to_timespec64(rem);
1916
1917 return nanosleep_copyout(restart, &rmt);
1918 }
1919 return -ERESTART_RESTARTBLOCK;
1920}
1921
1922static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1923{
1924 struct hrtimer_sleeper t;
1925 int ret;
1926
1927 hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
1928 HRTIMER_MODE_ABS);
1929 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1930 ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
1931 destroy_hrtimer_on_stack(&t.timer);
1932 return ret;
1933}
1934
1935long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
1936 const clockid_t clockid)
1937{
1938 struct restart_block *restart;
1939 struct hrtimer_sleeper t;
1940 int ret = 0;
1941 u64 slack;
1942
1943 slack = current->timer_slack_ns;
1944 if (dl_task(current) || rt_task(current))
1945 slack = 0;
1946
1947 hrtimer_init_sleeper_on_stack(&t, clockid, mode);
1948 hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
1949 ret = do_nanosleep(&t, mode);
1950 if (ret != -ERESTART_RESTARTBLOCK)
1951 goto out;
1952
1953
1954 if (mode == HRTIMER_MODE_ABS) {
1955 ret = -ERESTARTNOHAND;
1956 goto out;
1957 }
1958
1959 restart = ¤t->restart_block;
1960 restart->nanosleep.clockid = t.timer.base->clockid;
1961 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1962 set_restart_fn(restart, hrtimer_nanosleep_restart);
1963out:
1964 destroy_hrtimer_on_stack(&t.timer);
1965 return ret;
1966}
1967
1968#ifdef CONFIG_64BIT
1969
1970SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
1971 struct __kernel_timespec __user *, rmtp)
1972{
1973 struct timespec64 tu;
1974
1975 if (get_timespec64(&tu, rqtp))
1976 return -EFAULT;
1977
1978 if (!timespec64_valid(&tu))
1979 return -EINVAL;
1980
1981 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1982 current->restart_block.nanosleep.rmtp = rmtp;
1983 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
1984 CLOCK_MONOTONIC);
1985}
1986
1987#endif
1988
1989#ifdef CONFIG_COMPAT_32BIT_TIME
1990
1991SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
1992 struct old_timespec32 __user *, rmtp)
1993{
1994 struct timespec64 tu;
1995
1996 if (get_old_timespec32(&tu, rqtp))
1997 return -EFAULT;
1998
1999 if (!timespec64_valid(&tu))
2000 return -EINVAL;
2001
2002 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
2003 current->restart_block.nanosleep.compat_rmtp = rmtp;
2004 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
2005 CLOCK_MONOTONIC);
2006}
2007#endif
2008
2009
2010
2011
2012int hrtimers_prepare_cpu(unsigned int cpu)
2013{
2014 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
2015 int i;
2016
2017 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2018 struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
2019
2020 clock_b->cpu_base = cpu_base;
2021 seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
2022 timerqueue_init_head(&clock_b->active);
2023 }
2024
2025 cpu_base->cpu = cpu;
2026 cpu_base->active_bases = 0;
2027 cpu_base->hres_active = 0;
2028 cpu_base->hang_detected = 0;
2029 cpu_base->next_timer = NULL;
2030 cpu_base->softirq_next_timer = NULL;
2031 cpu_base->expires_next = KTIME_MAX;
2032 cpu_base->softirq_expires_next = KTIME_MAX;
2033 hrtimer_cpu_base_init_expiry_lock(cpu_base);
2034 return 0;
2035}
2036
2037#ifdef CONFIG_HOTPLUG_CPU
2038
2039static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
2040 struct hrtimer_clock_base *new_base)
2041{
2042 struct hrtimer *timer;
2043 struct timerqueue_node *node;
2044
2045 while ((node = timerqueue_getnext(&old_base->active))) {
2046 timer = container_of(node, struct hrtimer, node);
2047 BUG_ON(hrtimer_callback_running(timer));
2048 debug_deactivate(timer);
2049
2050
2051
2052
2053
2054
2055 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
2056 timer->base = new_base;
2057
2058
2059
2060
2061
2062
2063
2064
2065 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
2066 }
2067}
2068
2069int hrtimers_dead_cpu(unsigned int scpu)
2070{
2071 struct hrtimer_cpu_base *old_base, *new_base;
2072 int i;
2073
2074 BUG_ON(cpu_online(scpu));
2075 tick_cancel_sched_timer(scpu);
2076
2077
2078
2079
2080
2081
2082 local_bh_disable();
2083 local_irq_disable();
2084 old_base = &per_cpu(hrtimer_bases, scpu);
2085 new_base = this_cpu_ptr(&hrtimer_bases);
2086
2087
2088
2089
2090 raw_spin_lock(&new_base->lock);
2091 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2092
2093 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2094 migrate_hrtimer_list(&old_base->clock_base[i],
2095 &new_base->clock_base[i]);
2096 }
2097
2098
2099
2100
2101
2102 hrtimer_update_softirq_timer(new_base, false);
2103
2104 raw_spin_unlock(&old_base->lock);
2105 raw_spin_unlock(&new_base->lock);
2106
2107
2108 __hrtimer_peek_ahead_timers();
2109 local_irq_enable();
2110 local_bh_enable();
2111 return 0;
2112}
2113
2114#endif
2115
2116void __init hrtimers_init(void)
2117{
2118 hrtimers_prepare_cpu(smp_processor_id());
2119 open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
2120}
2121
2122
2123
2124
2125
2126
2127
2128
2129int __sched
2130schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
2131 const enum hrtimer_mode mode, clockid_t clock_id)
2132{
2133 struct hrtimer_sleeper t;
2134
2135
2136
2137
2138
2139 if (expires && *expires == 0) {
2140 __set_current_state(TASK_RUNNING);
2141 return 0;
2142 }
2143
2144
2145
2146
2147 if (!expires) {
2148 schedule();
2149 return -EINTR;
2150 }
2151
2152 hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
2153 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
2154 hrtimer_sleeper_start_expires(&t, mode);
2155
2156 if (likely(t.task))
2157 schedule();
2158
2159 hrtimer_cancel(&t.timer);
2160 destroy_hrtimer_on_stack(&t.timer);
2161
2162 __set_current_state(TASK_RUNNING);
2163
2164 return !t.task ? 0 : -EINTR;
2165}
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
2200 const enum hrtimer_mode mode)
2201{
2202 return schedule_hrtimeout_range_clock(expires, delta, mode,
2203 CLOCK_MONOTONIC);
2204}
2205EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233int __sched schedule_hrtimeout(ktime_t *expires,
2234 const enum hrtimer_mode mode)
2235{
2236 return schedule_hrtimeout_range(expires, 0, mode);
2237}
2238EXPORT_SYMBOL_GPL(schedule_hrtimeout);
2239