1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/cpu.h>
26#include <linux/export.h>
27#include <linux/percpu.h>
28#include <linux/hrtimer.h>
29#include <linux/notifier.h>
30#include <linux/syscalls.h>
31#include <linux/interrupt.h>
32#include <linux/tick.h>
33#include <linux/err.h>
34#include <linux/debugobjects.h>
35#include <linux/sched/signal.h>
36#include <linux/sched/sysctl.h>
37#include <linux/sched/rt.h>
38#include <linux/sched/deadline.h>
39#include <linux/sched/nohz.h>
40#include <linux/sched/debug.h>
41#include <linux/timer.h>
42#include <linux/freezer.h>
43#include <linux/compat.h>
44
45#include <linux/uaccess.h>
46
47#include <trace/events/timer.h>
48
49#include "tick-internal.h"
50
51
52
53
54
55#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
56#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
57#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
58#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
59
60
61
62
63
64
65
66
67
68DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
69{
70 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
71 .clock_base =
72 {
73 {
74 .index = HRTIMER_BASE_MONOTONIC,
75 .clockid = CLOCK_MONOTONIC,
76 .get_time = &ktime_get,
77 },
78 {
79 .index = HRTIMER_BASE_REALTIME,
80 .clockid = CLOCK_REALTIME,
81 .get_time = &ktime_get_real,
82 },
83 {
84 .index = HRTIMER_BASE_BOOTTIME,
85 .clockid = CLOCK_BOOTTIME,
86 .get_time = &ktime_get_boottime,
87 },
88 {
89 .index = HRTIMER_BASE_TAI,
90 .clockid = CLOCK_TAI,
91 .get_time = &ktime_get_clocktai,
92 },
93 {
94 .index = HRTIMER_BASE_MONOTONIC_SOFT,
95 .clockid = CLOCK_MONOTONIC,
96 .get_time = &ktime_get,
97 },
98 {
99 .index = HRTIMER_BASE_REALTIME_SOFT,
100 .clockid = CLOCK_REALTIME,
101 .get_time = &ktime_get_real,
102 },
103 {
104 .index = HRTIMER_BASE_BOOTTIME_SOFT,
105 .clockid = CLOCK_BOOTTIME,
106 .get_time = &ktime_get_boottime,
107 },
108 {
109 .index = HRTIMER_BASE_TAI_SOFT,
110 .clockid = CLOCK_TAI,
111 .get_time = &ktime_get_clocktai,
112 },
113 }
114};
115
116static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
117
118 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
119
120 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
121 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
122 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
123 [CLOCK_TAI] = HRTIMER_BASE_TAI,
124};
125
126
127
128
129
130#ifdef CONFIG_SMP
131
132
133
134
135
136
137static struct hrtimer_cpu_base migration_cpu_base = {
138 .clock_base = { { .cpu_base = &migration_cpu_base, }, },
139};
140
141#define migration_base migration_cpu_base.clock_base[0]
142
143static inline bool is_migration_base(struct hrtimer_clock_base *base)
144{
145 return base == &migration_base;
146}
147
148
149
150
151
152
153
154
155
156
157
158
159
160static
161struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
162 unsigned long *flags)
163{
164 struct hrtimer_clock_base *base;
165
166 for (;;) {
167 base = READ_ONCE(timer->base);
168 if (likely(base != &migration_base)) {
169 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
170 if (likely(base == timer->base))
171 return base;
172
173 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
174 }
175 cpu_relax();
176 }
177}
178
179
180
181
182
183
184
185
186
187
188static int
189hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
190{
191 ktime_t expires;
192
193 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
194 return expires < new_base->cpu_base->expires_next;
195}
196
197static inline
198struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
199 int pinned)
200{
201#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
202 if (static_branch_likely(&timers_migration_enabled) && !pinned)
203 return &per_cpu(hrtimer_bases, get_nohz_timer_target());
204#endif
205 return base;
206}
207
208
209
210
211
212
213
214
215
216
217
218
219
220static inline struct hrtimer_clock_base *
221switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
222 int pinned)
223{
224 struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
225 struct hrtimer_clock_base *new_base;
226 int basenum = base->index;
227
228 this_cpu_base = this_cpu_ptr(&hrtimer_bases);
229 new_cpu_base = get_target_base(this_cpu_base, pinned);
230again:
231 new_base = &new_cpu_base->clock_base[basenum];
232
233 if (base != new_base) {
234
235
236
237
238
239
240
241
242
243 if (unlikely(hrtimer_callback_running(timer)))
244 return base;
245
246
247 WRITE_ONCE(timer->base, &migration_base);
248 raw_spin_unlock(&base->cpu_base->lock);
249 raw_spin_lock(&new_base->cpu_base->lock);
250
251 if (new_cpu_base != this_cpu_base &&
252 hrtimer_check_target(timer, new_base)) {
253 raw_spin_unlock(&new_base->cpu_base->lock);
254 raw_spin_lock(&base->cpu_base->lock);
255 new_cpu_base = this_cpu_base;
256 WRITE_ONCE(timer->base, base);
257 goto again;
258 }
259 WRITE_ONCE(timer->base, new_base);
260 } else {
261 if (new_cpu_base != this_cpu_base &&
262 hrtimer_check_target(timer, new_base)) {
263 new_cpu_base = this_cpu_base;
264 goto again;
265 }
266 }
267 return new_base;
268}
269
270#else
271
272static inline bool is_migration_base(struct hrtimer_clock_base *base)
273{
274 return false;
275}
276
277static inline struct hrtimer_clock_base *
278lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
279{
280 struct hrtimer_clock_base *base = timer->base;
281
282 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
283
284 return base;
285}
286
287# define switch_hrtimer_base(t, b, p) (b)
288
289#endif
290
291
292
293
294
295#if BITS_PER_LONG < 64
296
297
298
299s64 __ktime_divns(const ktime_t kt, s64 div)
300{
301 int sft = 0;
302 s64 dclc;
303 u64 tmp;
304
305 dclc = ktime_to_ns(kt);
306 tmp = dclc < 0 ? -dclc : dclc;
307
308
309 while (div >> 32) {
310 sft++;
311 div >>= 1;
312 }
313 tmp >>= sft;
314 do_div(tmp, (u32) div);
315 return dclc < 0 ? -tmp : tmp;
316}
317EXPORT_SYMBOL_GPL(__ktime_divns);
318#endif
319
320
321
322
323ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
324{
325 ktime_t res = ktime_add_unsafe(lhs, rhs);
326
327
328
329
330
331 if (res < 0 || res < lhs || res < rhs)
332 res = ktime_set(KTIME_SEC_MAX, 0);
333
334 return res;
335}
336
337EXPORT_SYMBOL_GPL(ktime_add_safe);
338
339#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
340
341static struct debug_obj_descr hrtimer_debug_descr;
342
343static void *hrtimer_debug_hint(void *addr)
344{
345 return ((struct hrtimer *) addr)->function;
346}
347
348
349
350
351
352static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
353{
354 struct hrtimer *timer = addr;
355
356 switch (state) {
357 case ODEBUG_STATE_ACTIVE:
358 hrtimer_cancel(timer);
359 debug_object_init(timer, &hrtimer_debug_descr);
360 return true;
361 default:
362 return false;
363 }
364}
365
366
367
368
369
370
371static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
372{
373 switch (state) {
374 case ODEBUG_STATE_ACTIVE:
375 WARN_ON(1);
376
377 default:
378 return false;
379 }
380}
381
382
383
384
385
386static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
387{
388 struct hrtimer *timer = addr;
389
390 switch (state) {
391 case ODEBUG_STATE_ACTIVE:
392 hrtimer_cancel(timer);
393 debug_object_free(timer, &hrtimer_debug_descr);
394 return true;
395 default:
396 return false;
397 }
398}
399
400static struct debug_obj_descr hrtimer_debug_descr = {
401 .name = "hrtimer",
402 .debug_hint = hrtimer_debug_hint,
403 .fixup_init = hrtimer_fixup_init,
404 .fixup_activate = hrtimer_fixup_activate,
405 .fixup_free = hrtimer_fixup_free,
406};
407
408static inline void debug_hrtimer_init(struct hrtimer *timer)
409{
410 debug_object_init(timer, &hrtimer_debug_descr);
411}
412
413static inline void debug_hrtimer_activate(struct hrtimer *timer,
414 enum hrtimer_mode mode)
415{
416 debug_object_activate(timer, &hrtimer_debug_descr);
417}
418
419static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
420{
421 debug_object_deactivate(timer, &hrtimer_debug_descr);
422}
423
424static inline void debug_hrtimer_free(struct hrtimer *timer)
425{
426 debug_object_free(timer, &hrtimer_debug_descr);
427}
428
429static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
430 enum hrtimer_mode mode);
431
432void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
433 enum hrtimer_mode mode)
434{
435 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
436 __hrtimer_init(timer, clock_id, mode);
437}
438EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
439
440static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
441 clockid_t clock_id, enum hrtimer_mode mode);
442
443void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
444 clockid_t clock_id, enum hrtimer_mode mode)
445{
446 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
447 __hrtimer_init_sleeper(sl, clock_id, mode);
448}
449EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
450
451void destroy_hrtimer_on_stack(struct hrtimer *timer)
452{
453 debug_object_free(timer, &hrtimer_debug_descr);
454}
455EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
456
457#else
458
459static inline void debug_hrtimer_init(struct hrtimer *timer) { }
460static inline void debug_hrtimer_activate(struct hrtimer *timer,
461 enum hrtimer_mode mode) { }
462static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
463#endif
464
465static inline void
466debug_init(struct hrtimer *timer, clockid_t clockid,
467 enum hrtimer_mode mode)
468{
469 debug_hrtimer_init(timer);
470 trace_hrtimer_init(timer, clockid, mode);
471}
472
473static inline void debug_activate(struct hrtimer *timer,
474 enum hrtimer_mode mode)
475{
476 debug_hrtimer_activate(timer, mode);
477 trace_hrtimer_start(timer, mode);
478}
479
480static inline void debug_deactivate(struct hrtimer *timer)
481{
482 debug_hrtimer_deactivate(timer);
483 trace_hrtimer_cancel(timer);
484}
485
486static struct hrtimer_clock_base *
487__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
488{
489 unsigned int idx;
490
491 if (!*active)
492 return NULL;
493
494 idx = __ffs(*active);
495 *active &= ~(1U << idx);
496
497 return &cpu_base->clock_base[idx];
498}
499
500#define for_each_active_base(base, cpu_base, active) \
501 while ((base = __next_base((cpu_base), &(active))))
502
503static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
504 const struct hrtimer *exclude,
505 unsigned int active,
506 ktime_t expires_next)
507{
508 struct hrtimer_clock_base *base;
509 ktime_t expires;
510
511 for_each_active_base(base, cpu_base, active) {
512 struct timerqueue_node *next;
513 struct hrtimer *timer;
514
515 next = timerqueue_getnext(&base->active);
516 timer = container_of(next, struct hrtimer, node);
517 if (timer == exclude) {
518
519 next = timerqueue_iterate_next(next);
520 if (!next)
521 continue;
522
523 timer = container_of(next, struct hrtimer, node);
524 }
525 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
526 if (expires < expires_next) {
527 expires_next = expires;
528
529
530 if (exclude)
531 continue;
532
533 if (timer->is_soft)
534 cpu_base->softirq_next_timer = timer;
535 else
536 cpu_base->next_timer = timer;
537 }
538 }
539
540
541
542
543
544 if (expires_next < 0)
545 expires_next = 0;
546 return expires_next;
547}
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566static ktime_t
567__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
568{
569 unsigned int active;
570 struct hrtimer *next_timer = NULL;
571 ktime_t expires_next = KTIME_MAX;
572
573 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
574 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
575 cpu_base->softirq_next_timer = NULL;
576 expires_next = __hrtimer_next_event_base(cpu_base, NULL,
577 active, KTIME_MAX);
578
579 next_timer = cpu_base->softirq_next_timer;
580 }
581
582 if (active_mask & HRTIMER_ACTIVE_HARD) {
583 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
584 cpu_base->next_timer = next_timer;
585 expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
586 expires_next);
587 }
588
589 return expires_next;
590}
591
592static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
593{
594 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
595 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
596 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
597
598 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
599 offs_real, offs_boot, offs_tai);
600
601 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
602 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
603 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
604
605 return now;
606}
607
608
609
610
611static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
612{
613 return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
614 cpu_base->hres_active : 0;
615}
616
617static inline int hrtimer_hres_active(void)
618{
619 return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
620}
621
622
623
624
625
626
627static void
628hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
629{
630 ktime_t expires_next;
631
632
633
634
635 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
636
637 if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
638
639
640
641
642
643 if (cpu_base->softirq_activated)
644 expires_next = __hrtimer_get_next_event(cpu_base,
645 HRTIMER_ACTIVE_HARD);
646 else
647 cpu_base->softirq_expires_next = expires_next;
648 }
649
650 if (skip_equal && expires_next == cpu_base->expires_next)
651 return;
652
653 cpu_base->expires_next = expires_next;
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
673 return;
674
675 tick_program_event(cpu_base->expires_next, 1);
676}
677
678
679#ifdef CONFIG_HIGH_RES_TIMERS
680
681
682
683
684static bool hrtimer_hres_enabled __read_mostly = true;
685unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
686EXPORT_SYMBOL_GPL(hrtimer_resolution);
687
688
689
690
691static int __init setup_hrtimer_hres(char *str)
692{
693 return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
694}
695
696__setup("highres=", setup_hrtimer_hres);
697
698
699
700
701static inline int hrtimer_is_hres_enabled(void)
702{
703 return hrtimer_hres_enabled;
704}
705
706
707
708
709
710
711static void retrigger_next_event(void *arg)
712{
713 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
714
715 if (!__hrtimer_hres_active(base))
716 return;
717
718 raw_spin_lock(&base->lock);
719 hrtimer_update_base(base);
720 hrtimer_force_reprogram(base, 0);
721 raw_spin_unlock(&base->lock);
722}
723
724
725
726
727static void hrtimer_switch_to_hres(void)
728{
729 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
730
731 if (tick_init_highres()) {
732 pr_warn("Could not switch to high resolution mode on CPU %u\n",
733 base->cpu);
734 return;
735 }
736 base->hres_active = 1;
737 hrtimer_resolution = HIGH_RES_NSEC;
738
739 tick_setup_sched_timer();
740
741 retrigger_next_event(NULL);
742}
743
744static void clock_was_set_work(struct work_struct *work)
745{
746 clock_was_set();
747}
748
749static DECLARE_WORK(hrtimer_work, clock_was_set_work);
750
751
752
753
754
755void clock_was_set_delayed(void)
756{
757 schedule_work(&hrtimer_work);
758}
759
760#else
761
762static inline int hrtimer_is_hres_enabled(void) { return 0; }
763static inline void hrtimer_switch_to_hres(void) { }
764static inline void retrigger_next_event(void *arg) { }
765
766#endif
767
768
769
770
771
772
773
774
775static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
776{
777 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
778 struct hrtimer_clock_base *base = timer->base;
779 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
780
781 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
782
783
784
785
786
787 if (expires < 0)
788 expires = 0;
789
790 if (timer->is_soft) {
791
792
793
794
795
796
797
798 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
799
800 if (timer_cpu_base->softirq_activated)
801 return;
802
803 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
804 return;
805
806 timer_cpu_base->softirq_next_timer = timer;
807 timer_cpu_base->softirq_expires_next = expires;
808
809 if (!ktime_before(expires, timer_cpu_base->expires_next) ||
810 !reprogram)
811 return;
812 }
813
814
815
816
817
818 if (base->cpu_base != cpu_base)
819 return;
820
821
822
823
824
825
826
827
828 if (cpu_base->in_hrtirq)
829 return;
830
831 if (expires >= cpu_base->expires_next)
832 return;
833
834
835 cpu_base->next_timer = timer;
836 cpu_base->expires_next = expires;
837
838
839
840
841
842
843
844
845
846
847 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
848 return;
849
850
851
852
853
854 tick_program_event(expires, 1);
855}
856
857
858
859
860
861
862
863
864
865
866
867
868void clock_was_set(void)
869{
870#ifdef CONFIG_HIGH_RES_TIMERS
871
872 on_each_cpu(retrigger_next_event, NULL, 1);
873#endif
874 timerfd_clock_was_set();
875}
876
877
878
879
880
881
882
883void hrtimers_resume(void)
884{
885 lockdep_assert_irqs_disabled();
886
887 retrigger_next_event(NULL);
888
889 clock_was_set_delayed();
890}
891
892
893
894
895static inline
896void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
897{
898 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
899}
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
919{
920 u64 orun = 1;
921 ktime_t delta;
922
923 delta = ktime_sub(now, hrtimer_get_expires(timer));
924
925 if (delta < 0)
926 return 0;
927
928 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
929 return 0;
930
931 if (interval < hrtimer_resolution)
932 interval = hrtimer_resolution;
933
934 if (unlikely(delta >= interval)) {
935 s64 incr = ktime_to_ns(interval);
936
937 orun = ktime_divns(delta, incr);
938 hrtimer_add_expires_ns(timer, incr * orun);
939 if (hrtimer_get_expires_tv64(timer) > now)
940 return orun;
941
942
943
944
945 orun++;
946 }
947 hrtimer_add_expires(timer, interval);
948
949 return orun;
950}
951EXPORT_SYMBOL_GPL(hrtimer_forward);
952
953
954
955
956
957
958
959
960
961static int enqueue_hrtimer(struct hrtimer *timer,
962 struct hrtimer_clock_base *base,
963 enum hrtimer_mode mode)
964{
965 debug_activate(timer, mode);
966
967 base->cpu_base->active_bases |= 1 << base->index;
968
969
970 WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
971
972 return timerqueue_add(&base->active, &timer->node);
973}
974
975
976
977
978
979
980
981
982
983
984
985static void __remove_hrtimer(struct hrtimer *timer,
986 struct hrtimer_clock_base *base,
987 u8 newstate, int reprogram)
988{
989 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
990 u8 state = timer->state;
991
992
993 WRITE_ONCE(timer->state, newstate);
994 if (!(state & HRTIMER_STATE_ENQUEUED))
995 return;
996
997 if (!timerqueue_del(&base->active, &timer->node))
998 cpu_base->active_bases &= ~(1 << base->index);
999
1000
1001
1002
1003
1004
1005
1006
1007
1008 if (reprogram && timer == cpu_base->next_timer)
1009 hrtimer_force_reprogram(cpu_base, 1);
1010}
1011
1012
1013
1014
1015static inline int
1016remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
1017{
1018 u8 state = timer->state;
1019
1020 if (state & HRTIMER_STATE_ENQUEUED) {
1021 int reprogram;
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031 debug_deactivate(timer);
1032 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1033
1034 if (!restart)
1035 state = HRTIMER_STATE_INACTIVE;
1036
1037 __remove_hrtimer(timer, base, state, reprogram);
1038 return 1;
1039 }
1040 return 0;
1041}
1042
1043static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
1044 const enum hrtimer_mode mode)
1045{
1046#ifdef CONFIG_TIME_LOW_RES
1047
1048
1049
1050
1051
1052 timer->is_rel = mode & HRTIMER_MODE_REL;
1053 if (timer->is_rel)
1054 tim = ktime_add_safe(tim, hrtimer_resolution);
1055#endif
1056 return tim;
1057}
1058
1059static void
1060hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
1061{
1062 ktime_t expires;
1063
1064
1065
1066
1067 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
1068
1069
1070
1071
1072
1073
1074 if (expires == KTIME_MAX)
1075 return;
1076
1077
1078
1079
1080
1081 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
1082}
1083
1084static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1085 u64 delta_ns, const enum hrtimer_mode mode,
1086 struct hrtimer_clock_base *base)
1087{
1088 struct hrtimer_clock_base *new_base;
1089
1090
1091 remove_hrtimer(timer, base, true);
1092
1093 if (mode & HRTIMER_MODE_REL)
1094 tim = ktime_add_safe(tim, base->get_time());
1095
1096 tim = hrtimer_update_lowres(timer, tim, mode);
1097
1098 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1099
1100
1101 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1102
1103 return enqueue_hrtimer(timer, new_base, mode);
1104}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1116 u64 delta_ns, const enum hrtimer_mode mode)
1117{
1118 struct hrtimer_clock_base *base;
1119 unsigned long flags;
1120
1121
1122
1123
1124
1125
1126 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1127 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
1128 else
1129 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
1130
1131 base = lock_hrtimer_base(timer, &flags);
1132
1133 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
1134 hrtimer_reprogram(timer, true);
1135
1136 unlock_hrtimer_base(timer, &flags);
1137}
1138EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151int hrtimer_try_to_cancel(struct hrtimer *timer)
1152{
1153 struct hrtimer_clock_base *base;
1154 unsigned long flags;
1155 int ret = -1;
1156
1157
1158
1159
1160
1161
1162
1163 if (!hrtimer_active(timer))
1164 return 0;
1165
1166 base = lock_hrtimer_base(timer, &flags);
1167
1168 if (!hrtimer_callback_running(timer))
1169 ret = remove_hrtimer(timer, base, false);
1170
1171 unlock_hrtimer_base(timer, &flags);
1172
1173 return ret;
1174
1175}
1176EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1177
1178#ifdef CONFIG_PREEMPT_RT
1179static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
1180{
1181 spin_lock_init(&base->softirq_expiry_lock);
1182}
1183
1184static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
1185{
1186 spin_lock(&base->softirq_expiry_lock);
1187}
1188
1189static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
1190{
1191 spin_unlock(&base->softirq_expiry_lock);
1192}
1193
1194
1195
1196
1197
1198
1199
1200
1201static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
1202 unsigned long flags)
1203{
1204 if (atomic_read(&cpu_base->timer_waiters)) {
1205 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1206 spin_unlock(&cpu_base->softirq_expiry_lock);
1207 spin_lock(&cpu_base->softirq_expiry_lock);
1208 raw_spin_lock_irq(&cpu_base->lock);
1209 }
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228void hrtimer_cancel_wait_running(const struct hrtimer *timer)
1229{
1230
1231 struct hrtimer_clock_base *base = READ_ONCE(timer->base);
1232
1233
1234
1235
1236
1237 if (!timer->is_soft || is_migration_base(base)) {
1238 cpu_relax();
1239 return;
1240 }
1241
1242
1243
1244
1245
1246
1247
1248
1249 atomic_inc(&base->cpu_base->timer_waiters);
1250 spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
1251 atomic_dec(&base->cpu_base->timer_waiters);
1252 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
1253}
1254#else
1255static inline void
1256hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
1257static inline void
1258hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
1259static inline void
1260hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
1261static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
1262 unsigned long flags) { }
1263#endif
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273int hrtimer_cancel(struct hrtimer *timer)
1274{
1275 int ret;
1276
1277 do {
1278 ret = hrtimer_try_to_cancel(timer);
1279
1280 if (ret < 0)
1281 hrtimer_cancel_wait_running(timer);
1282 } while (ret < 0);
1283 return ret;
1284}
1285EXPORT_SYMBOL_GPL(hrtimer_cancel);
1286
1287
1288
1289
1290
1291
1292ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1293{
1294 unsigned long flags;
1295 ktime_t rem;
1296
1297 lock_hrtimer_base(timer, &flags);
1298 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
1299 rem = hrtimer_expires_remaining_adjusted(timer);
1300 else
1301 rem = hrtimer_expires_remaining(timer);
1302 unlock_hrtimer_base(timer, &flags);
1303
1304 return rem;
1305}
1306EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
1307
1308#ifdef CONFIG_NO_HZ_COMMON
1309
1310
1311
1312
1313
1314u64 hrtimer_get_next_event(void)
1315{
1316 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1317 u64 expires = KTIME_MAX;
1318 unsigned long flags;
1319
1320 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1321
1322 if (!__hrtimer_hres_active(cpu_base))
1323 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1324
1325 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1326
1327 return expires;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337u64 hrtimer_next_event_without(const struct hrtimer *exclude)
1338{
1339 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1340 u64 expires = KTIME_MAX;
1341 unsigned long flags;
1342
1343 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1344
1345 if (__hrtimer_hres_active(cpu_base)) {
1346 unsigned int active;
1347
1348 if (!cpu_base->softirq_activated) {
1349 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
1350 expires = __hrtimer_next_event_base(cpu_base, exclude,
1351 active, KTIME_MAX);
1352 }
1353 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
1354 expires = __hrtimer_next_event_base(cpu_base, exclude, active,
1355 expires);
1356 }
1357
1358 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1359
1360 return expires;
1361}
1362#endif
1363
1364static inline int hrtimer_clockid_to_base(clockid_t clock_id)
1365{
1366 if (likely(clock_id < MAX_CLOCKS)) {
1367 int base = hrtimer_clock_to_base_table[clock_id];
1368
1369 if (likely(base != HRTIMER_MAX_CLOCK_BASES))
1370 return base;
1371 }
1372 WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
1373 return HRTIMER_BASE_MONOTONIC;
1374}
1375
1376static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1377 enum hrtimer_mode mode)
1378{
1379 bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
1380 struct hrtimer_cpu_base *cpu_base;
1381 int base;
1382
1383
1384
1385
1386
1387
1388
1389 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
1390 softtimer = true;
1391
1392 memset(timer, 0, sizeof(struct hrtimer));
1393
1394 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1395
1396
1397
1398
1399
1400
1401 if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
1402 clock_id = CLOCK_MONOTONIC;
1403
1404 base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
1405 base += hrtimer_clockid_to_base(clock_id);
1406 timer->is_soft = softtimer;
1407 timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
1408 timer->base = &cpu_base->clock_base[base];
1409 timerqueue_init(&timer->node);
1410}
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1425 enum hrtimer_mode mode)
1426{
1427 debug_init(timer, clock_id, mode);
1428 __hrtimer_init(timer, clock_id, mode);
1429}
1430EXPORT_SYMBOL_GPL(hrtimer_init);
1431
1432
1433
1434
1435
1436
1437
1438
1439bool hrtimer_active(const struct hrtimer *timer)
1440{
1441 struct hrtimer_clock_base *base;
1442 unsigned int seq;
1443
1444 do {
1445 base = READ_ONCE(timer->base);
1446 seq = raw_read_seqcount_begin(&base->seq);
1447
1448 if (timer->state != HRTIMER_STATE_INACTIVE ||
1449 base->running == timer)
1450 return true;
1451
1452 } while (read_seqcount_retry(&base->seq, seq) ||
1453 base != READ_ONCE(timer->base));
1454
1455 return false;
1456}
1457EXPORT_SYMBOL_GPL(hrtimer_active);
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1478 struct hrtimer_clock_base *base,
1479 struct hrtimer *timer, ktime_t *now,
1480 unsigned long flags) __must_hold(&cpu_base->lock)
1481{
1482 enum hrtimer_restart (*fn)(struct hrtimer *);
1483 bool expires_in_hardirq;
1484 int restart;
1485
1486 lockdep_assert_held(&cpu_base->lock);
1487
1488 debug_deactivate(timer);
1489 base->running = timer;
1490
1491
1492
1493
1494
1495
1496
1497
1498 raw_write_seqcount_barrier(&base->seq);
1499
1500 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
1501 fn = timer->function;
1502
1503
1504
1505
1506
1507
1508 if (IS_ENABLED(CONFIG_TIME_LOW_RES))
1509 timer->is_rel = false;
1510
1511
1512
1513
1514
1515
1516 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1517 trace_hrtimer_expire_entry(timer, now);
1518 expires_in_hardirq = lockdep_hrtimer_enter(timer);
1519
1520 restart = fn(timer);
1521
1522 lockdep_hrtimer_exit(expires_in_hardirq);
1523 trace_hrtimer_expire_exit(timer);
1524 raw_spin_lock_irq(&cpu_base->lock);
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 if (restart != HRTIMER_NORESTART &&
1536 !(timer->state & HRTIMER_STATE_ENQUEUED))
1537 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
1538
1539
1540
1541
1542
1543
1544
1545
1546 raw_write_seqcount_barrier(&base->seq);
1547
1548 WARN_ON_ONCE(base->running != timer);
1549 base->running = NULL;
1550}
1551
1552static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
1553 unsigned long flags, unsigned int active_mask)
1554{
1555 struct hrtimer_clock_base *base;
1556 unsigned int active = cpu_base->active_bases & active_mask;
1557
1558 for_each_active_base(base, cpu_base, active) {
1559 struct timerqueue_node *node;
1560 ktime_t basenow;
1561
1562 basenow = ktime_add(now, base->offset);
1563
1564 while ((node = timerqueue_getnext(&base->active))) {
1565 struct hrtimer *timer;
1566
1567 timer = container_of(node, struct hrtimer, node);
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581 if (basenow < hrtimer_get_softexpires_tv64(timer))
1582 break;
1583
1584 __run_hrtimer(cpu_base, base, timer, &basenow, flags);
1585 if (active_mask == HRTIMER_ACTIVE_SOFT)
1586 hrtimer_sync_wait_running(cpu_base, flags);
1587 }
1588 }
1589}
1590
1591static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
1592{
1593 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1594 unsigned long flags;
1595 ktime_t now;
1596
1597 hrtimer_cpu_base_lock_expiry(cpu_base);
1598 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1599
1600 now = hrtimer_update_base(cpu_base);
1601 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
1602
1603 cpu_base->softirq_activated = 0;
1604 hrtimer_update_softirq_timer(cpu_base, true);
1605
1606 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1607 hrtimer_cpu_base_unlock_expiry(cpu_base);
1608}
1609
1610#ifdef CONFIG_HIGH_RES_TIMERS
1611
1612
1613
1614
1615
1616void hrtimer_interrupt(struct clock_event_device *dev)
1617{
1618 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1619 ktime_t expires_next, now, entry_time, delta;
1620 unsigned long flags;
1621 int retries = 0;
1622
1623 BUG_ON(!cpu_base->hres_active);
1624 cpu_base->nr_events++;
1625 dev->next_event = KTIME_MAX;
1626
1627 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1628 entry_time = now = hrtimer_update_base(cpu_base);
1629retry:
1630 cpu_base->in_hrtirq = 1;
1631
1632
1633
1634
1635
1636
1637
1638 cpu_base->expires_next = KTIME_MAX;
1639
1640 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1641 cpu_base->softirq_expires_next = KTIME_MAX;
1642 cpu_base->softirq_activated = 1;
1643 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1644 }
1645
1646 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1647
1648
1649 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1650
1651
1652
1653
1654 cpu_base->expires_next = expires_next;
1655 cpu_base->in_hrtirq = 0;
1656 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1657
1658
1659 if (!tick_program_event(expires_next, 0)) {
1660 cpu_base->hang_detected = 0;
1661 return;
1662 }
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1678 now = hrtimer_update_base(cpu_base);
1679 cpu_base->nr_retries++;
1680 if (++retries < 3)
1681 goto retry;
1682
1683
1684
1685
1686
1687
1688 cpu_base->nr_hangs++;
1689 cpu_base->hang_detected = 1;
1690 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1691
1692 delta = ktime_sub(now, entry_time);
1693 if ((unsigned int)delta > cpu_base->max_hang_time)
1694 cpu_base->max_hang_time = (unsigned int) delta;
1695
1696
1697
1698
1699 if (delta > 100 * NSEC_PER_MSEC)
1700 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1701 else
1702 expires_next = ktime_add(now, delta);
1703 tick_program_event(expires_next, 1);
1704 pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
1705}
1706
1707
1708static inline void __hrtimer_peek_ahead_timers(void)
1709{
1710 struct tick_device *td;
1711
1712 if (!hrtimer_hres_active())
1713 return;
1714
1715 td = this_cpu_ptr(&tick_cpu_device);
1716 if (td && td->evtdev)
1717 hrtimer_interrupt(td->evtdev);
1718}
1719
1720#else
1721
1722static inline void __hrtimer_peek_ahead_timers(void) { }
1723
1724#endif
1725
1726
1727
1728
1729void hrtimer_run_queues(void)
1730{
1731 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1732 unsigned long flags;
1733 ktime_t now;
1734
1735 if (__hrtimer_hres_active(cpu_base))
1736 return;
1737
1738
1739
1740
1741
1742
1743
1744
1745 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1746 hrtimer_switch_to_hres();
1747 return;
1748 }
1749
1750 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1751 now = hrtimer_update_base(cpu_base);
1752
1753 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1754 cpu_base->softirq_expires_next = KTIME_MAX;
1755 cpu_base->softirq_activated = 1;
1756 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1757 }
1758
1759 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1760 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1761}
1762
1763
1764
1765
1766static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1767{
1768 struct hrtimer_sleeper *t =
1769 container_of(timer, struct hrtimer_sleeper, timer);
1770 struct task_struct *task = t->task;
1771
1772 t->task = NULL;
1773 if (task)
1774 wake_up_process(task);
1775
1776 return HRTIMER_NORESTART;
1777}
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
1788 enum hrtimer_mode mode)
1789{
1790
1791
1792
1793
1794
1795
1796
1797 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
1798 mode |= HRTIMER_MODE_HARD;
1799
1800 hrtimer_start_expires(&sl->timer, mode);
1801}
1802EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
1803
1804static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
1805 clockid_t clock_id, enum hrtimer_mode mode)
1806{
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1827 if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
1828 mode |= HRTIMER_MODE_HARD;
1829 }
1830
1831 __hrtimer_init(&sl->timer, clock_id, mode);
1832 sl->timer.function = hrtimer_wakeup;
1833 sl->task = current;
1834}
1835
1836
1837
1838
1839
1840
1841
1842void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
1843 enum hrtimer_mode mode)
1844{
1845 debug_init(&sl->timer, clock_id, mode);
1846 __hrtimer_init_sleeper(sl, clock_id, mode);
1847
1848}
1849EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1850
1851int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
1852{
1853 switch(restart->nanosleep.type) {
1854#ifdef CONFIG_COMPAT_32BIT_TIME
1855 case TT_COMPAT:
1856 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
1857 return -EFAULT;
1858 break;
1859#endif
1860 case TT_NATIVE:
1861 if (put_timespec64(ts, restart->nanosleep.rmtp))
1862 return -EFAULT;
1863 break;
1864 default:
1865 BUG();
1866 }
1867 return -ERESTART_RESTARTBLOCK;
1868}
1869
1870static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1871{
1872 struct restart_block *restart;
1873
1874 do {
1875 set_current_state(TASK_INTERRUPTIBLE);
1876 hrtimer_sleeper_start_expires(t, mode);
1877
1878 if (likely(t->task))
1879 freezable_schedule();
1880
1881 hrtimer_cancel(&t->timer);
1882 mode = HRTIMER_MODE_ABS;
1883
1884 } while (t->task && !signal_pending(current));
1885
1886 __set_current_state(TASK_RUNNING);
1887
1888 if (!t->task)
1889 return 0;
1890
1891 restart = ¤t->restart_block;
1892 if (restart->nanosleep.type != TT_NONE) {
1893 ktime_t rem = hrtimer_expires_remaining(&t->timer);
1894 struct timespec64 rmt;
1895
1896 if (rem <= 0)
1897 return 0;
1898 rmt = ktime_to_timespec64(rem);
1899
1900 return nanosleep_copyout(restart, &rmt);
1901 }
1902 return -ERESTART_RESTARTBLOCK;
1903}
1904
1905static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1906{
1907 struct hrtimer_sleeper t;
1908 int ret;
1909
1910 hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
1911 HRTIMER_MODE_ABS);
1912 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1913 ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
1914 destroy_hrtimer_on_stack(&t.timer);
1915 return ret;
1916}
1917
1918long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
1919 const clockid_t clockid)
1920{
1921 struct restart_block *restart;
1922 struct hrtimer_sleeper t;
1923 int ret = 0;
1924 u64 slack;
1925
1926 slack = current->timer_slack_ns;
1927 if (dl_task(current) || rt_task(current))
1928 slack = 0;
1929
1930 hrtimer_init_sleeper_on_stack(&t, clockid, mode);
1931 hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
1932 ret = do_nanosleep(&t, mode);
1933 if (ret != -ERESTART_RESTARTBLOCK)
1934 goto out;
1935
1936
1937 if (mode == HRTIMER_MODE_ABS) {
1938 ret = -ERESTARTNOHAND;
1939 goto out;
1940 }
1941
1942 restart = ¤t->restart_block;
1943 restart->fn = hrtimer_nanosleep_restart;
1944 restart->nanosleep.clockid = t.timer.base->clockid;
1945 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1946out:
1947 destroy_hrtimer_on_stack(&t.timer);
1948 return ret;
1949}
1950
1951#ifdef CONFIG_64BIT
1952
1953SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
1954 struct __kernel_timespec __user *, rmtp)
1955{
1956 struct timespec64 tu;
1957
1958 if (get_timespec64(&tu, rqtp))
1959 return -EFAULT;
1960
1961 if (!timespec64_valid(&tu))
1962 return -EINVAL;
1963
1964 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1965 current->restart_block.nanosleep.rmtp = rmtp;
1966 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
1967 CLOCK_MONOTONIC);
1968}
1969
1970#endif
1971
1972#ifdef CONFIG_COMPAT_32BIT_TIME
1973
1974SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
1975 struct old_timespec32 __user *, rmtp)
1976{
1977 struct timespec64 tu;
1978
1979 if (get_old_timespec32(&tu, rqtp))
1980 return -EFAULT;
1981
1982 if (!timespec64_valid(&tu))
1983 return -EINVAL;
1984
1985 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1986 current->restart_block.nanosleep.compat_rmtp = rmtp;
1987 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
1988 CLOCK_MONOTONIC);
1989}
1990#endif
1991
1992
1993
1994
1995int hrtimers_prepare_cpu(unsigned int cpu)
1996{
1997 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1998 int i;
1999
2000 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2001 cpu_base->clock_base[i].cpu_base = cpu_base;
2002 timerqueue_init_head(&cpu_base->clock_base[i].active);
2003 }
2004
2005 cpu_base->cpu = cpu;
2006 cpu_base->active_bases = 0;
2007 cpu_base->hres_active = 0;
2008 cpu_base->hang_detected = 0;
2009 cpu_base->next_timer = NULL;
2010 cpu_base->softirq_next_timer = NULL;
2011 cpu_base->expires_next = KTIME_MAX;
2012 cpu_base->softirq_expires_next = KTIME_MAX;
2013 hrtimer_cpu_base_init_expiry_lock(cpu_base);
2014 return 0;
2015}
2016
2017#ifdef CONFIG_HOTPLUG_CPU
2018
2019static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
2020 struct hrtimer_clock_base *new_base)
2021{
2022 struct hrtimer *timer;
2023 struct timerqueue_node *node;
2024
2025 while ((node = timerqueue_getnext(&old_base->active))) {
2026 timer = container_of(node, struct hrtimer, node);
2027 BUG_ON(hrtimer_callback_running(timer));
2028 debug_deactivate(timer);
2029
2030
2031
2032
2033
2034
2035 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
2036 timer->base = new_base;
2037
2038
2039
2040
2041
2042
2043
2044
2045 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
2046 }
2047}
2048
2049int hrtimers_dead_cpu(unsigned int scpu)
2050{
2051 struct hrtimer_cpu_base *old_base, *new_base;
2052 int i;
2053
2054 BUG_ON(cpu_online(scpu));
2055 tick_cancel_sched_timer(scpu);
2056
2057
2058
2059
2060
2061
2062 local_bh_disable();
2063 local_irq_disable();
2064 old_base = &per_cpu(hrtimer_bases, scpu);
2065 new_base = this_cpu_ptr(&hrtimer_bases);
2066
2067
2068
2069
2070 raw_spin_lock(&new_base->lock);
2071 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2072
2073 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2074 migrate_hrtimer_list(&old_base->clock_base[i],
2075 &new_base->clock_base[i]);
2076 }
2077
2078
2079
2080
2081
2082 hrtimer_update_softirq_timer(new_base, false);
2083
2084 raw_spin_unlock(&old_base->lock);
2085 raw_spin_unlock(&new_base->lock);
2086
2087
2088 __hrtimer_peek_ahead_timers();
2089 local_irq_enable();
2090 local_bh_enable();
2091 return 0;
2092}
2093
2094#endif
2095
2096void __init hrtimers_init(void)
2097{
2098 hrtimers_prepare_cpu(smp_processor_id());
2099 open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
2100}
2101
2102
2103
2104
2105
2106
2107
2108
2109int __sched
2110schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
2111 const enum hrtimer_mode mode, clockid_t clock_id)
2112{
2113 struct hrtimer_sleeper t;
2114
2115
2116
2117
2118
2119 if (expires && *expires == 0) {
2120 __set_current_state(TASK_RUNNING);
2121 return 0;
2122 }
2123
2124
2125
2126
2127 if (!expires) {
2128 schedule();
2129 return -EINTR;
2130 }
2131
2132 hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
2133 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
2134 hrtimer_sleeper_start_expires(&t, mode);
2135
2136 if (likely(t.task))
2137 schedule();
2138
2139 hrtimer_cancel(&t.timer);
2140 destroy_hrtimer_on_stack(&t.timer);
2141
2142 __set_current_state(TASK_RUNNING);
2143
2144 return !t.task ? 0 : -EINTR;
2145}
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
2180 const enum hrtimer_mode mode)
2181{
2182 return schedule_hrtimeout_range_clock(expires, delta, mode,
2183 CLOCK_MONOTONIC);
2184}
2185EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213int __sched schedule_hrtimeout(ktime_t *expires,
2214 const enum hrtimer_mode mode)
2215{
2216 return schedule_hrtimeout_range(expires, 0, mode);
2217}
2218EXPORT_SYMBOL_GPL(schedule_hrtimeout);
2219