1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/cpu.h>
26#include <linux/export.h>
27#include <linux/percpu.h>
28#include <linux/hrtimer.h>
29#include <linux/notifier.h>
30#include <linux/syscalls.h>
31#include <linux/interrupt.h>
32#include <linux/tick.h>
33#include <linux/err.h>
34#include <linux/debugobjects.h>
35#include <linux/sched/signal.h>
36#include <linux/sched/sysctl.h>
37#include <linux/sched/rt.h>
38#include <linux/sched/deadline.h>
39#include <linux/sched/nohz.h>
40#include <linux/sched/debug.h>
41#include <linux/timer.h>
42#include <linux/freezer.h>
43#include <linux/compat.h>
44
45#include <linux/uaccess.h>
46
47#include <trace/events/timer.h>
48
49#include "tick-internal.h"
50
51
52
53
54
55#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
56#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
57#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
58#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
59
60
61
62
63
64
65
66
67
68DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
69{
70 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
71 .clock_base =
72 {
73 {
74 .index = HRTIMER_BASE_MONOTONIC,
75 .clockid = CLOCK_MONOTONIC,
76 .get_time = &ktime_get,
77 },
78 {
79 .index = HRTIMER_BASE_REALTIME,
80 .clockid = CLOCK_REALTIME,
81 .get_time = &ktime_get_real,
82 },
83 {
84 .index = HRTIMER_BASE_BOOTTIME,
85 .clockid = CLOCK_BOOTTIME,
86 .get_time = &ktime_get_boottime,
87 },
88 {
89 .index = HRTIMER_BASE_TAI,
90 .clockid = CLOCK_TAI,
91 .get_time = &ktime_get_clocktai,
92 },
93 {
94 .index = HRTIMER_BASE_MONOTONIC_SOFT,
95 .clockid = CLOCK_MONOTONIC,
96 .get_time = &ktime_get,
97 },
98 {
99 .index = HRTIMER_BASE_REALTIME_SOFT,
100 .clockid = CLOCK_REALTIME,
101 .get_time = &ktime_get_real,
102 },
103 {
104 .index = HRTIMER_BASE_BOOTTIME_SOFT,
105 .clockid = CLOCK_BOOTTIME,
106 .get_time = &ktime_get_boottime,
107 },
108 {
109 .index = HRTIMER_BASE_TAI_SOFT,
110 .clockid = CLOCK_TAI,
111 .get_time = &ktime_get_clocktai,
112 },
113 }
114};
115
116static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
117
118 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
119
120 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
121 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
122 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
123 [CLOCK_TAI] = HRTIMER_BASE_TAI,
124};
125
126
127
128
129
130#ifdef CONFIG_SMP
131
132
133
134
135
136
137static struct hrtimer_cpu_base migration_cpu_base = {
138 .clock_base = { {
139 .cpu_base = &migration_cpu_base,
140 .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
141 &migration_cpu_base.lock),
142 }, },
143};
144
145#define migration_base migration_cpu_base.clock_base[0]
146
147static inline bool is_migration_base(struct hrtimer_clock_base *base)
148{
149 return base == &migration_base;
150}
151
152
153
154
155
156
157
158
159
160
161
162
163
164static
165struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
166 unsigned long *flags)
167{
168 struct hrtimer_clock_base *base;
169
170 for (;;) {
171 base = READ_ONCE(timer->base);
172 if (likely(base != &migration_base)) {
173 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
174 if (likely(base == timer->base))
175 return base;
176
177 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
178 }
179 cpu_relax();
180 }
181}
182
183
184
185
186
187
188
189
190
191
192static int
193hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
194{
195 ktime_t expires;
196
197 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
198 return expires < new_base->cpu_base->expires_next;
199}
200
201static inline
202struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
203 int pinned)
204{
205#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
206 if (static_branch_likely(&timers_migration_enabled) && !pinned)
207 return &per_cpu(hrtimer_bases, get_nohz_timer_target());
208#endif
209 return base;
210}
211
212
213
214
215
216
217
218
219
220
221
222
223
224static inline struct hrtimer_clock_base *
225switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
226 int pinned)
227{
228 struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
229 struct hrtimer_clock_base *new_base;
230 int basenum = base->index;
231
232 this_cpu_base = this_cpu_ptr(&hrtimer_bases);
233 new_cpu_base = get_target_base(this_cpu_base, pinned);
234again:
235 new_base = &new_cpu_base->clock_base[basenum];
236
237 if (base != new_base) {
238
239
240
241
242
243
244
245
246
247 if (unlikely(hrtimer_callback_running(timer)))
248 return base;
249
250
251 WRITE_ONCE(timer->base, &migration_base);
252 raw_spin_unlock(&base->cpu_base->lock);
253 raw_spin_lock(&new_base->cpu_base->lock);
254
255 if (new_cpu_base != this_cpu_base &&
256 hrtimer_check_target(timer, new_base)) {
257 raw_spin_unlock(&new_base->cpu_base->lock);
258 raw_spin_lock(&base->cpu_base->lock);
259 new_cpu_base = this_cpu_base;
260 WRITE_ONCE(timer->base, base);
261 goto again;
262 }
263 WRITE_ONCE(timer->base, new_base);
264 } else {
265 if (new_cpu_base != this_cpu_base &&
266 hrtimer_check_target(timer, new_base)) {
267 new_cpu_base = this_cpu_base;
268 goto again;
269 }
270 }
271 return new_base;
272}
273
274#else
275
276static inline bool is_migration_base(struct hrtimer_clock_base *base)
277{
278 return false;
279}
280
281static inline struct hrtimer_clock_base *
282lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
283{
284 struct hrtimer_clock_base *base = timer->base;
285
286 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
287
288 return base;
289}
290
291# define switch_hrtimer_base(t, b, p) (b)
292
293#endif
294
295
296
297
298
299#if BITS_PER_LONG < 64
300
301
302
303s64 __ktime_divns(const ktime_t kt, s64 div)
304{
305 int sft = 0;
306 s64 dclc;
307 u64 tmp;
308
309 dclc = ktime_to_ns(kt);
310 tmp = dclc < 0 ? -dclc : dclc;
311
312
313 while (div >> 32) {
314 sft++;
315 div >>= 1;
316 }
317 tmp >>= sft;
318 do_div(tmp, (u32) div);
319 return dclc < 0 ? -tmp : tmp;
320}
321EXPORT_SYMBOL_GPL(__ktime_divns);
322#endif
323
324
325
326
327ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
328{
329 ktime_t res = ktime_add_unsafe(lhs, rhs);
330
331
332
333
334
335 if (res < 0 || res < lhs || res < rhs)
336 res = ktime_set(KTIME_SEC_MAX, 0);
337
338 return res;
339}
340
341EXPORT_SYMBOL_GPL(ktime_add_safe);
342
343#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
344
345static const struct debug_obj_descr hrtimer_debug_descr;
346
347static void *hrtimer_debug_hint(void *addr)
348{
349 return ((struct hrtimer *) addr)->function;
350}
351
352
353
354
355
356static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
357{
358 struct hrtimer *timer = addr;
359
360 switch (state) {
361 case ODEBUG_STATE_ACTIVE:
362 hrtimer_cancel(timer);
363 debug_object_init(timer, &hrtimer_debug_descr);
364 return true;
365 default:
366 return false;
367 }
368}
369
370
371
372
373
374
375static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
376{
377 switch (state) {
378 case ODEBUG_STATE_ACTIVE:
379 WARN_ON(1);
380 fallthrough;
381 default:
382 return false;
383 }
384}
385
386
387
388
389
390static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
391{
392 struct hrtimer *timer = addr;
393
394 switch (state) {
395 case ODEBUG_STATE_ACTIVE:
396 hrtimer_cancel(timer);
397 debug_object_free(timer, &hrtimer_debug_descr);
398 return true;
399 default:
400 return false;
401 }
402}
403
404static const struct debug_obj_descr hrtimer_debug_descr = {
405 .name = "hrtimer",
406 .debug_hint = hrtimer_debug_hint,
407 .fixup_init = hrtimer_fixup_init,
408 .fixup_activate = hrtimer_fixup_activate,
409 .fixup_free = hrtimer_fixup_free,
410};
411
412static inline void debug_hrtimer_init(struct hrtimer *timer)
413{
414 debug_object_init(timer, &hrtimer_debug_descr);
415}
416
417static inline void debug_hrtimer_activate(struct hrtimer *timer,
418 enum hrtimer_mode mode)
419{
420 debug_object_activate(timer, &hrtimer_debug_descr);
421}
422
423static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
424{
425 debug_object_deactivate(timer, &hrtimer_debug_descr);
426}
427
428static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
429 enum hrtimer_mode mode);
430
431void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
432 enum hrtimer_mode mode)
433{
434 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
435 __hrtimer_init(timer, clock_id, mode);
436}
437EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
438
439static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
440 clockid_t clock_id, enum hrtimer_mode mode);
441
442void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
443 clockid_t clock_id, enum hrtimer_mode mode)
444{
445 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
446 __hrtimer_init_sleeper(sl, clock_id, mode);
447}
448EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
449
450void destroy_hrtimer_on_stack(struct hrtimer *timer)
451{
452 debug_object_free(timer, &hrtimer_debug_descr);
453}
454EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
455
456#else
457
458static inline void debug_hrtimer_init(struct hrtimer *timer) { }
459static inline void debug_hrtimer_activate(struct hrtimer *timer,
460 enum hrtimer_mode mode) { }
461static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
462#endif
463
464static inline void
465debug_init(struct hrtimer *timer, clockid_t clockid,
466 enum hrtimer_mode mode)
467{
468 debug_hrtimer_init(timer);
469 trace_hrtimer_init(timer, clockid, mode);
470}
471
472static inline void debug_activate(struct hrtimer *timer,
473 enum hrtimer_mode mode)
474{
475 debug_hrtimer_activate(timer, mode);
476 trace_hrtimer_start(timer, mode);
477}
478
479static inline void debug_deactivate(struct hrtimer *timer)
480{
481 debug_hrtimer_deactivate(timer);
482 trace_hrtimer_cancel(timer);
483}
484
485static struct hrtimer_clock_base *
486__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
487{
488 unsigned int idx;
489
490 if (!*active)
491 return NULL;
492
493 idx = __ffs(*active);
494 *active &= ~(1U << idx);
495
496 return &cpu_base->clock_base[idx];
497}
498
499#define for_each_active_base(base, cpu_base, active) \
500 while ((base = __next_base((cpu_base), &(active))))
501
502static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
503 const struct hrtimer *exclude,
504 unsigned int active,
505 ktime_t expires_next)
506{
507 struct hrtimer_clock_base *base;
508 ktime_t expires;
509
510 for_each_active_base(base, cpu_base, active) {
511 struct timerqueue_node *next;
512 struct hrtimer *timer;
513
514 next = timerqueue_getnext(&base->active);
515 timer = container_of(next, struct hrtimer, node);
516 if (timer == exclude) {
517
518 next = timerqueue_iterate_next(next);
519 if (!next)
520 continue;
521
522 timer = container_of(next, struct hrtimer, node);
523 }
524 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
525 if (expires < expires_next) {
526 expires_next = expires;
527
528
529 if (exclude)
530 continue;
531
532 if (timer->is_soft)
533 cpu_base->softirq_next_timer = timer;
534 else
535 cpu_base->next_timer = timer;
536 }
537 }
538
539
540
541
542
543 if (expires_next < 0)
544 expires_next = 0;
545 return expires_next;
546}
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565static ktime_t
566__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
567{
568 unsigned int active;
569 struct hrtimer *next_timer = NULL;
570 ktime_t expires_next = KTIME_MAX;
571
572 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
573 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
574 cpu_base->softirq_next_timer = NULL;
575 expires_next = __hrtimer_next_event_base(cpu_base, NULL,
576 active, KTIME_MAX);
577
578 next_timer = cpu_base->softirq_next_timer;
579 }
580
581 if (active_mask & HRTIMER_ACTIVE_HARD) {
582 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
583 cpu_base->next_timer = next_timer;
584 expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
585 expires_next);
586 }
587
588 return expires_next;
589}
590
591static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
592{
593 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
594 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
595 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
596
597 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
598 offs_real, offs_boot, offs_tai);
599
600 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
601 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
602 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
603
604 return now;
605}
606
607
608
609
610static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
611{
612 return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
613 cpu_base->hres_active : 0;
614}
615
616static inline int hrtimer_hres_active(void)
617{
618 return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
619}
620
621
622
623
624
625
626static void
627hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
628{
629 ktime_t expires_next;
630
631
632
633
634 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
635
636 if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
637
638
639
640
641
642 if (cpu_base->softirq_activated)
643 expires_next = __hrtimer_get_next_event(cpu_base,
644 HRTIMER_ACTIVE_HARD);
645 else
646 cpu_base->softirq_expires_next = expires_next;
647 }
648
649 if (skip_equal && expires_next == cpu_base->expires_next)
650 return;
651
652 cpu_base->expires_next = expires_next;
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
672 return;
673
674 tick_program_event(cpu_base->expires_next, 1);
675}
676
677
678#ifdef CONFIG_HIGH_RES_TIMERS
679
680
681
682
683static bool hrtimer_hres_enabled __read_mostly = true;
684unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
685EXPORT_SYMBOL_GPL(hrtimer_resolution);
686
687
688
689
690static int __init setup_hrtimer_hres(char *str)
691{
692 return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
693}
694
695__setup("highres=", setup_hrtimer_hres);
696
697
698
699
700static inline int hrtimer_is_hres_enabled(void)
701{
702 return hrtimer_hres_enabled;
703}
704
705
706
707
708
709
710static void retrigger_next_event(void *arg)
711{
712 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
713
714 if (!__hrtimer_hres_active(base))
715 return;
716
717 raw_spin_lock(&base->lock);
718 hrtimer_update_base(base);
719 hrtimer_force_reprogram(base, 0);
720 raw_spin_unlock(&base->lock);
721}
722
723
724
725
726static void hrtimer_switch_to_hres(void)
727{
728 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
729
730 if (tick_init_highres()) {
731 pr_warn("Could not switch to high resolution mode on CPU %u\n",
732 base->cpu);
733 return;
734 }
735 base->hres_active = 1;
736 hrtimer_resolution = HIGH_RES_NSEC;
737
738 tick_setup_sched_timer();
739
740 retrigger_next_event(NULL);
741}
742
743static void clock_was_set_work(struct work_struct *work)
744{
745 clock_was_set();
746}
747
748static DECLARE_WORK(hrtimer_work, clock_was_set_work);
749
750
751
752
753
754void clock_was_set_delayed(void)
755{
756 schedule_work(&hrtimer_work);
757}
758
759#else
760
761static inline int hrtimer_is_hres_enabled(void) { return 0; }
762static inline void hrtimer_switch_to_hres(void) { }
763static inline void retrigger_next_event(void *arg) { }
764
765#endif
766
767
768
769
770
771
772
773
774static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
775{
776 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
777 struct hrtimer_clock_base *base = timer->base;
778 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
779
780 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
781
782
783
784
785
786 if (expires < 0)
787 expires = 0;
788
789 if (timer->is_soft) {
790
791
792
793
794
795
796
797 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
798
799 if (timer_cpu_base->softirq_activated)
800 return;
801
802 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
803 return;
804
805 timer_cpu_base->softirq_next_timer = timer;
806 timer_cpu_base->softirq_expires_next = expires;
807
808 if (!ktime_before(expires, timer_cpu_base->expires_next) ||
809 !reprogram)
810 return;
811 }
812
813
814
815
816
817 if (base->cpu_base != cpu_base)
818 return;
819
820
821
822
823
824
825
826
827 if (cpu_base->in_hrtirq)
828 return;
829
830 if (expires >= cpu_base->expires_next)
831 return;
832
833
834 cpu_base->next_timer = timer;
835 cpu_base->expires_next = expires;
836
837
838
839
840
841
842
843
844
845
846 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
847 return;
848
849
850
851
852
853 tick_program_event(expires, 1);
854}
855
856
857
858
859
860
861
862
863
864
865
866
867void clock_was_set(void)
868{
869#ifdef CONFIG_HIGH_RES_TIMERS
870
871 on_each_cpu(retrigger_next_event, NULL, 1);
872#endif
873 timerfd_clock_was_set();
874}
875
876
877
878
879
880
881
882void hrtimers_resume(void)
883{
884 lockdep_assert_irqs_disabled();
885
886 retrigger_next_event(NULL);
887
888 clock_was_set_delayed();
889}
890
891
892
893
894static inline
895void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
896{
897 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
898}
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
918{
919 u64 orun = 1;
920 ktime_t delta;
921
922 delta = ktime_sub(now, hrtimer_get_expires(timer));
923
924 if (delta < 0)
925 return 0;
926
927 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
928 return 0;
929
930 if (interval < hrtimer_resolution)
931 interval = hrtimer_resolution;
932
933 if (unlikely(delta >= interval)) {
934 s64 incr = ktime_to_ns(interval);
935
936 orun = ktime_divns(delta, incr);
937 hrtimer_add_expires_ns(timer, incr * orun);
938 if (hrtimer_get_expires_tv64(timer) > now)
939 return orun;
940
941
942
943
944 orun++;
945 }
946 hrtimer_add_expires(timer, interval);
947
948 return orun;
949}
950EXPORT_SYMBOL_GPL(hrtimer_forward);
951
952
953
954
955
956
957
958
959
960static int enqueue_hrtimer(struct hrtimer *timer,
961 struct hrtimer_clock_base *base,
962 enum hrtimer_mode mode)
963{
964 debug_activate(timer, mode);
965
966 base->cpu_base->active_bases |= 1 << base->index;
967
968
969 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
970
971 return timerqueue_add(&base->active, &timer->node);
972}
973
974
975
976
977
978
979
980
981
982
983
984static void __remove_hrtimer(struct hrtimer *timer,
985 struct hrtimer_clock_base *base,
986 u8 newstate, int reprogram)
987{
988 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
989 u8 state = timer->state;
990
991
992 WRITE_ONCE(timer->state, newstate);
993 if (!(state & HRTIMER_STATE_ENQUEUED))
994 return;
995
996 if (!timerqueue_del(&base->active, &timer->node))
997 cpu_base->active_bases &= ~(1 << base->index);
998
999
1000
1001
1002
1003
1004
1005
1006
1007 if (reprogram && timer == cpu_base->next_timer)
1008 hrtimer_force_reprogram(cpu_base, 1);
1009}
1010
1011
1012
1013
1014static inline int
1015remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
1016{
1017 u8 state = timer->state;
1018
1019 if (state & HRTIMER_STATE_ENQUEUED) {
1020 int reprogram;
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 debug_deactivate(timer);
1031 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1032
1033 if (!restart)
1034 state = HRTIMER_STATE_INACTIVE;
1035
1036 __remove_hrtimer(timer, base, state, reprogram);
1037 return 1;
1038 }
1039 return 0;
1040}
1041
1042static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
1043 const enum hrtimer_mode mode)
1044{
1045#ifdef CONFIG_TIME_LOW_RES
1046
1047
1048
1049
1050
1051 timer->is_rel = mode & HRTIMER_MODE_REL;
1052 if (timer->is_rel)
1053 tim = ktime_add_safe(tim, hrtimer_resolution);
1054#endif
1055 return tim;
1056}
1057
1058static void
1059hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
1060{
1061 ktime_t expires;
1062
1063
1064
1065
1066 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
1067
1068
1069
1070
1071
1072
1073 if (expires == KTIME_MAX)
1074 return;
1075
1076
1077
1078
1079
1080 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
1081}
1082
1083static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1084 u64 delta_ns, const enum hrtimer_mode mode,
1085 struct hrtimer_clock_base *base)
1086{
1087 struct hrtimer_clock_base *new_base;
1088
1089
1090 remove_hrtimer(timer, base, true);
1091
1092 if (mode & HRTIMER_MODE_REL)
1093 tim = ktime_add_safe(tim, base->get_time());
1094
1095 tim = hrtimer_update_lowres(timer, tim, mode);
1096
1097 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1098
1099
1100 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1101
1102 return enqueue_hrtimer(timer, new_base, mode);
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1115 u64 delta_ns, const enum hrtimer_mode mode)
1116{
1117 struct hrtimer_clock_base *base;
1118 unsigned long flags;
1119
1120
1121
1122
1123
1124
1125 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1126 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
1127 else
1128 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
1129
1130 base = lock_hrtimer_base(timer, &flags);
1131
1132 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
1133 hrtimer_reprogram(timer, true);
1134
1135 unlock_hrtimer_base(timer, &flags);
1136}
1137EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150int hrtimer_try_to_cancel(struct hrtimer *timer)
1151{
1152 struct hrtimer_clock_base *base;
1153 unsigned long flags;
1154 int ret = -1;
1155
1156
1157
1158
1159
1160
1161
1162 if (!hrtimer_active(timer))
1163 return 0;
1164
1165 base = lock_hrtimer_base(timer, &flags);
1166
1167 if (!hrtimer_callback_running(timer))
1168 ret = remove_hrtimer(timer, base, false);
1169
1170 unlock_hrtimer_base(timer, &flags);
1171
1172 return ret;
1173
1174}
1175EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1176
1177#ifdef CONFIG_PREEMPT_RT
1178static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
1179{
1180 spin_lock_init(&base->softirq_expiry_lock);
1181}
1182
1183static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
1184{
1185 spin_lock(&base->softirq_expiry_lock);
1186}
1187
1188static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
1189{
1190 spin_unlock(&base->softirq_expiry_lock);
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
1201 unsigned long flags)
1202{
1203 if (atomic_read(&cpu_base->timer_waiters)) {
1204 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1205 spin_unlock(&cpu_base->softirq_expiry_lock);
1206 spin_lock(&cpu_base->softirq_expiry_lock);
1207 raw_spin_lock_irq(&cpu_base->lock);
1208 }
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227void hrtimer_cancel_wait_running(const struct hrtimer *timer)
1228{
1229
1230 struct hrtimer_clock_base *base = READ_ONCE(timer->base);
1231
1232
1233
1234
1235
1236 if (!timer->is_soft || is_migration_base(base)) {
1237 cpu_relax();
1238 return;
1239 }
1240
1241
1242
1243
1244
1245
1246
1247
1248 atomic_inc(&base->cpu_base->timer_waiters);
1249 spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
1250 atomic_dec(&base->cpu_base->timer_waiters);
1251 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
1252}
1253#else
1254static inline void
1255hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
1256static inline void
1257hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
1258static inline void
1259hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
1260static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
1261 unsigned long flags) { }
1262#endif
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272int hrtimer_cancel(struct hrtimer *timer)
1273{
1274 int ret;
1275
1276 do {
1277 ret = hrtimer_try_to_cancel(timer);
1278
1279 if (ret < 0)
1280 hrtimer_cancel_wait_running(timer);
1281 } while (ret < 0);
1282 return ret;
1283}
1284EXPORT_SYMBOL_GPL(hrtimer_cancel);
1285
1286
1287
1288
1289
1290
1291ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1292{
1293 unsigned long flags;
1294 ktime_t rem;
1295
1296 lock_hrtimer_base(timer, &flags);
1297 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
1298 rem = hrtimer_expires_remaining_adjusted(timer);
1299 else
1300 rem = hrtimer_expires_remaining(timer);
1301 unlock_hrtimer_base(timer, &flags);
1302
1303 return rem;
1304}
1305EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
1306
1307#ifdef CONFIG_NO_HZ_COMMON
1308
1309
1310
1311
1312
1313u64 hrtimer_get_next_event(void)
1314{
1315 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1316 u64 expires = KTIME_MAX;
1317 unsigned long flags;
1318
1319 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1320
1321 if (!__hrtimer_hres_active(cpu_base))
1322 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1323
1324 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1325
1326 return expires;
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336u64 hrtimer_next_event_without(const struct hrtimer *exclude)
1337{
1338 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1339 u64 expires = KTIME_MAX;
1340 unsigned long flags;
1341
1342 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1343
1344 if (__hrtimer_hres_active(cpu_base)) {
1345 unsigned int active;
1346
1347 if (!cpu_base->softirq_activated) {
1348 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
1349 expires = __hrtimer_next_event_base(cpu_base, exclude,
1350 active, KTIME_MAX);
1351 }
1352 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
1353 expires = __hrtimer_next_event_base(cpu_base, exclude, active,
1354 expires);
1355 }
1356
1357 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1358
1359 return expires;
1360}
1361#endif
1362
1363static inline int hrtimer_clockid_to_base(clockid_t clock_id)
1364{
1365 if (likely(clock_id < MAX_CLOCKS)) {
1366 int base = hrtimer_clock_to_base_table[clock_id];
1367
1368 if (likely(base != HRTIMER_MAX_CLOCK_BASES))
1369 return base;
1370 }
1371 WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
1372 return HRTIMER_BASE_MONOTONIC;
1373}
1374
1375static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1376 enum hrtimer_mode mode)
1377{
1378 bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
1379 struct hrtimer_cpu_base *cpu_base;
1380 int base;
1381
1382
1383
1384
1385
1386
1387
1388 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
1389 softtimer = true;
1390
1391 memset(timer, 0, sizeof(struct hrtimer));
1392
1393 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1394
1395
1396
1397
1398
1399
1400 if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
1401 clock_id = CLOCK_MONOTONIC;
1402
1403 base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
1404 base += hrtimer_clockid_to_base(clock_id);
1405 timer->is_soft = softtimer;
1406 timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
1407 timer->base = &cpu_base->clock_base[base];
1408 timerqueue_init(&timer->node);
1409}
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1424 enum hrtimer_mode mode)
1425{
1426 debug_init(timer, clock_id, mode);
1427 __hrtimer_init(timer, clock_id, mode);
1428}
1429EXPORT_SYMBOL_GPL(hrtimer_init);
1430
1431
1432
1433
1434
1435
1436
1437
1438bool hrtimer_active(const struct hrtimer *timer)
1439{
1440 struct hrtimer_clock_base *base;
1441 unsigned int seq;
1442
1443 do {
1444 base = READ_ONCE(timer->base);
1445 seq = raw_read_seqcount_begin(&base->seq);
1446
1447 if (timer->state != HRTIMER_STATE_INACTIVE ||
1448 base->running == timer)
1449 return true;
1450
1451 } while (read_seqcount_retry(&base->seq, seq) ||
1452 base != READ_ONCE(timer->base));
1453
1454 return false;
1455}
1456EXPORT_SYMBOL_GPL(hrtimer_active);
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1477 struct hrtimer_clock_base *base,
1478 struct hrtimer *timer, ktime_t *now,
1479 unsigned long flags) __must_hold(&cpu_base->lock)
1480{
1481 enum hrtimer_restart (*fn)(struct hrtimer *);
1482 bool expires_in_hardirq;
1483 int restart;
1484
1485 lockdep_assert_held(&cpu_base->lock);
1486
1487 debug_deactivate(timer);
1488 base->running = timer;
1489
1490
1491
1492
1493
1494
1495
1496
1497 raw_write_seqcount_barrier(&base->seq);
1498
1499 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
1500 fn = timer->function;
1501
1502
1503
1504
1505
1506
1507 if (IS_ENABLED(CONFIG_TIME_LOW_RES))
1508 timer->is_rel = false;
1509
1510
1511
1512
1513
1514
1515 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1516 trace_hrtimer_expire_entry(timer, now);
1517 expires_in_hardirq = lockdep_hrtimer_enter(timer);
1518
1519 restart = fn(timer);
1520
1521 lockdep_hrtimer_exit(expires_in_hardirq);
1522 trace_hrtimer_expire_exit(timer);
1523 raw_spin_lock_irq(&cpu_base->lock);
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534 if (restart != HRTIMER_NORESTART &&
1535 !(timer->state & HRTIMER_STATE_ENQUEUED))
1536 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
1537
1538
1539
1540
1541
1542
1543
1544
1545 raw_write_seqcount_barrier(&base->seq);
1546
1547 WARN_ON_ONCE(base->running != timer);
1548 base->running = NULL;
1549}
1550
1551static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
1552 unsigned long flags, unsigned int active_mask)
1553{
1554 struct hrtimer_clock_base *base;
1555 unsigned int active = cpu_base->active_bases & active_mask;
1556
1557 for_each_active_base(base, cpu_base, active) {
1558 struct timerqueue_node *node;
1559 ktime_t basenow;
1560
1561 basenow = ktime_add(now, base->offset);
1562
1563 while ((node = timerqueue_getnext(&base->active))) {
1564 struct hrtimer *timer;
1565
1566 timer = container_of(node, struct hrtimer, node);
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580 if (basenow < hrtimer_get_softexpires_tv64(timer))
1581 break;
1582
1583 __run_hrtimer(cpu_base, base, timer, &basenow, flags);
1584 if (active_mask == HRTIMER_ACTIVE_SOFT)
1585 hrtimer_sync_wait_running(cpu_base, flags);
1586 }
1587 }
1588}
1589
1590static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
1591{
1592 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1593 unsigned long flags;
1594 ktime_t now;
1595
1596 hrtimer_cpu_base_lock_expiry(cpu_base);
1597 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1598
1599 now = hrtimer_update_base(cpu_base);
1600 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
1601
1602 cpu_base->softirq_activated = 0;
1603 hrtimer_update_softirq_timer(cpu_base, true);
1604
1605 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1606 hrtimer_cpu_base_unlock_expiry(cpu_base);
1607}
1608
1609#ifdef CONFIG_HIGH_RES_TIMERS
1610
1611
1612
1613
1614
1615void hrtimer_interrupt(struct clock_event_device *dev)
1616{
1617 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1618 ktime_t expires_next, now, entry_time, delta;
1619 unsigned long flags;
1620 int retries = 0;
1621
1622 BUG_ON(!cpu_base->hres_active);
1623 cpu_base->nr_events++;
1624 dev->next_event = KTIME_MAX;
1625
1626 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1627 entry_time = now = hrtimer_update_base(cpu_base);
1628retry:
1629 cpu_base->in_hrtirq = 1;
1630
1631
1632
1633
1634
1635
1636
1637 cpu_base->expires_next = KTIME_MAX;
1638
1639 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1640 cpu_base->softirq_expires_next = KTIME_MAX;
1641 cpu_base->softirq_activated = 1;
1642 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1643 }
1644
1645 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1646
1647
1648 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1649
1650
1651
1652
1653 cpu_base->expires_next = expires_next;
1654 cpu_base->in_hrtirq = 0;
1655 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1656
1657
1658 if (!tick_program_event(expires_next, 0)) {
1659 cpu_base->hang_detected = 0;
1660 return;
1661 }
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1677 now = hrtimer_update_base(cpu_base);
1678 cpu_base->nr_retries++;
1679 if (++retries < 3)
1680 goto retry;
1681
1682
1683
1684
1685
1686
1687 cpu_base->nr_hangs++;
1688 cpu_base->hang_detected = 1;
1689 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1690
1691 delta = ktime_sub(now, entry_time);
1692 if ((unsigned int)delta > cpu_base->max_hang_time)
1693 cpu_base->max_hang_time = (unsigned int) delta;
1694
1695
1696
1697
1698 if (delta > 100 * NSEC_PER_MSEC)
1699 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1700 else
1701 expires_next = ktime_add(now, delta);
1702 tick_program_event(expires_next, 1);
1703 pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
1704}
1705
1706
1707static inline void __hrtimer_peek_ahead_timers(void)
1708{
1709 struct tick_device *td;
1710
1711 if (!hrtimer_hres_active())
1712 return;
1713
1714 td = this_cpu_ptr(&tick_cpu_device);
1715 if (td && td->evtdev)
1716 hrtimer_interrupt(td->evtdev);
1717}
1718
1719#else
1720
1721static inline void __hrtimer_peek_ahead_timers(void) { }
1722
1723#endif
1724
1725
1726
1727
1728void hrtimer_run_queues(void)
1729{
1730 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1731 unsigned long flags;
1732 ktime_t now;
1733
1734 if (__hrtimer_hres_active(cpu_base))
1735 return;
1736
1737
1738
1739
1740
1741
1742
1743
1744 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1745 hrtimer_switch_to_hres();
1746 return;
1747 }
1748
1749 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1750 now = hrtimer_update_base(cpu_base);
1751
1752 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1753 cpu_base->softirq_expires_next = KTIME_MAX;
1754 cpu_base->softirq_activated = 1;
1755 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1756 }
1757
1758 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1759 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1760}
1761
1762
1763
1764
1765static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1766{
1767 struct hrtimer_sleeper *t =
1768 container_of(timer, struct hrtimer_sleeper, timer);
1769 struct task_struct *task = t->task;
1770
1771 t->task = NULL;
1772 if (task)
1773 wake_up_process(task);
1774
1775 return HRTIMER_NORESTART;
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
1787 enum hrtimer_mode mode)
1788{
1789
1790
1791
1792
1793
1794
1795
1796 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
1797 mode |= HRTIMER_MODE_HARD;
1798
1799 hrtimer_start_expires(&sl->timer, mode);
1800}
1801EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
1802
1803static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
1804 clockid_t clock_id, enum hrtimer_mode mode)
1805{
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1826 if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
1827 mode |= HRTIMER_MODE_HARD;
1828 }
1829
1830 __hrtimer_init(&sl->timer, clock_id, mode);
1831 sl->timer.function = hrtimer_wakeup;
1832 sl->task = current;
1833}
1834
1835
1836
1837
1838
1839
1840
1841void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
1842 enum hrtimer_mode mode)
1843{
1844 debug_init(&sl->timer, clock_id, mode);
1845 __hrtimer_init_sleeper(sl, clock_id, mode);
1846
1847}
1848EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1849
1850int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
1851{
1852 switch(restart->nanosleep.type) {
1853#ifdef CONFIG_COMPAT_32BIT_TIME
1854 case TT_COMPAT:
1855 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
1856 return -EFAULT;
1857 break;
1858#endif
1859 case TT_NATIVE:
1860 if (put_timespec64(ts, restart->nanosleep.rmtp))
1861 return -EFAULT;
1862 break;
1863 default:
1864 BUG();
1865 }
1866 return -ERESTART_RESTARTBLOCK;
1867}
1868
1869static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1870{
1871 struct restart_block *restart;
1872
1873 do {
1874 set_current_state(TASK_INTERRUPTIBLE);
1875 hrtimer_sleeper_start_expires(t, mode);
1876
1877 if (likely(t->task))
1878 freezable_schedule();
1879
1880 hrtimer_cancel(&t->timer);
1881 mode = HRTIMER_MODE_ABS;
1882
1883 } while (t->task && !signal_pending(current));
1884
1885 __set_current_state(TASK_RUNNING);
1886
1887 if (!t->task)
1888 return 0;
1889
1890 restart = ¤t->restart_block;
1891 if (restart->nanosleep.type != TT_NONE) {
1892 ktime_t rem = hrtimer_expires_remaining(&t->timer);
1893 struct timespec64 rmt;
1894
1895 if (rem <= 0)
1896 return 0;
1897 rmt = ktime_to_timespec64(rem);
1898
1899 return nanosleep_copyout(restart, &rmt);
1900 }
1901 return -ERESTART_RESTARTBLOCK;
1902}
1903
1904static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1905{
1906 struct hrtimer_sleeper t;
1907 int ret;
1908
1909 hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
1910 HRTIMER_MODE_ABS);
1911 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1912 ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
1913 destroy_hrtimer_on_stack(&t.timer);
1914 return ret;
1915}
1916
1917long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
1918 const clockid_t clockid)
1919{
1920 struct restart_block *restart;
1921 struct hrtimer_sleeper t;
1922 int ret = 0;
1923 u64 slack;
1924
1925 slack = current->timer_slack_ns;
1926 if (dl_task(current) || rt_task(current))
1927 slack = 0;
1928
1929 hrtimer_init_sleeper_on_stack(&t, clockid, mode);
1930 hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
1931 ret = do_nanosleep(&t, mode);
1932 if (ret != -ERESTART_RESTARTBLOCK)
1933 goto out;
1934
1935
1936 if (mode == HRTIMER_MODE_ABS) {
1937 ret = -ERESTARTNOHAND;
1938 goto out;
1939 }
1940
1941 restart = ¤t->restart_block;
1942 restart->fn = hrtimer_nanosleep_restart;
1943 restart->nanosleep.clockid = t.timer.base->clockid;
1944 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1945out:
1946 destroy_hrtimer_on_stack(&t.timer);
1947 return ret;
1948}
1949
1950#ifdef CONFIG_64BIT
1951
1952SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
1953 struct __kernel_timespec __user *, rmtp)
1954{
1955 struct timespec64 tu;
1956
1957 if (get_timespec64(&tu, rqtp))
1958 return -EFAULT;
1959
1960 if (!timespec64_valid(&tu))
1961 return -EINVAL;
1962
1963 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1964 current->restart_block.nanosleep.rmtp = rmtp;
1965 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
1966 CLOCK_MONOTONIC);
1967}
1968
1969#endif
1970
1971#ifdef CONFIG_COMPAT_32BIT_TIME
1972
1973SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
1974 struct old_timespec32 __user *, rmtp)
1975{
1976 struct timespec64 tu;
1977
1978 if (get_old_timespec32(&tu, rqtp))
1979 return -EFAULT;
1980
1981 if (!timespec64_valid(&tu))
1982 return -EINVAL;
1983
1984 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1985 current->restart_block.nanosleep.compat_rmtp = rmtp;
1986 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
1987 CLOCK_MONOTONIC);
1988}
1989#endif
1990
1991
1992
1993
1994int hrtimers_prepare_cpu(unsigned int cpu)
1995{
1996 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1997 int i;
1998
1999 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2000 struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
2001
2002 clock_b->cpu_base = cpu_base;
2003 seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
2004 timerqueue_init_head(&clock_b->active);
2005 }
2006
2007 cpu_base->cpu = cpu;
2008 cpu_base->active_bases = 0;
2009 cpu_base->hres_active = 0;
2010 cpu_base->hang_detected = 0;
2011 cpu_base->next_timer = NULL;
2012 cpu_base->softirq_next_timer = NULL;
2013 cpu_base->expires_next = KTIME_MAX;
2014 cpu_base->softirq_expires_next = KTIME_MAX;
2015 hrtimer_cpu_base_init_expiry_lock(cpu_base);
2016 return 0;
2017}
2018
2019#ifdef CONFIG_HOTPLUG_CPU
2020
2021static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
2022 struct hrtimer_clock_base *new_base)
2023{
2024 struct hrtimer *timer;
2025 struct timerqueue_node *node;
2026
2027 while ((node = timerqueue_getnext(&old_base->active))) {
2028 timer = container_of(node, struct hrtimer, node);
2029 BUG_ON(hrtimer_callback_running(timer));
2030 debug_deactivate(timer);
2031
2032
2033
2034
2035
2036
2037 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
2038 timer->base = new_base;
2039
2040
2041
2042
2043
2044
2045
2046
2047 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
2048 }
2049}
2050
2051int hrtimers_dead_cpu(unsigned int scpu)
2052{
2053 struct hrtimer_cpu_base *old_base, *new_base;
2054 int i;
2055
2056 BUG_ON(cpu_online(scpu));
2057 tick_cancel_sched_timer(scpu);
2058
2059
2060
2061
2062
2063
2064 local_bh_disable();
2065 local_irq_disable();
2066 old_base = &per_cpu(hrtimer_bases, scpu);
2067 new_base = this_cpu_ptr(&hrtimer_bases);
2068
2069
2070
2071
2072 raw_spin_lock(&new_base->lock);
2073 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2074
2075 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2076 migrate_hrtimer_list(&old_base->clock_base[i],
2077 &new_base->clock_base[i]);
2078 }
2079
2080
2081
2082
2083
2084 hrtimer_update_softirq_timer(new_base, false);
2085
2086 raw_spin_unlock(&old_base->lock);
2087 raw_spin_unlock(&new_base->lock);
2088
2089
2090 __hrtimer_peek_ahead_timers();
2091 local_irq_enable();
2092 local_bh_enable();
2093 return 0;
2094}
2095
2096#endif
2097
2098void __init hrtimers_init(void)
2099{
2100 hrtimers_prepare_cpu(smp_processor_id());
2101 open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
2102}
2103
2104
2105
2106
2107
2108
2109
2110
2111int __sched
2112schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
2113 const enum hrtimer_mode mode, clockid_t clock_id)
2114{
2115 struct hrtimer_sleeper t;
2116
2117
2118
2119
2120
2121 if (expires && *expires == 0) {
2122 __set_current_state(TASK_RUNNING);
2123 return 0;
2124 }
2125
2126
2127
2128
2129 if (!expires) {
2130 schedule();
2131 return -EINTR;
2132 }
2133
2134 hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
2135 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
2136 hrtimer_sleeper_start_expires(&t, mode);
2137
2138 if (likely(t.task))
2139 schedule();
2140
2141 hrtimer_cancel(&t.timer);
2142 destroy_hrtimer_on_stack(&t.timer);
2143
2144 __set_current_state(TASK_RUNNING);
2145
2146 return !t.task ? 0 : -EINTR;
2147}
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
2182 const enum hrtimer_mode mode)
2183{
2184 return schedule_hrtimeout_range_clock(expires, delta, mode,
2185 CLOCK_MONOTONIC);
2186}
2187EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215int __sched schedule_hrtimeout(ktime_t *expires,
2216 const enum hrtimer_mode mode)
2217{
2218 return schedule_hrtimeout_range(expires, 0, mode);
2219}
2220EXPORT_SYMBOL_GPL(schedule_hrtimeout);
2221