1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/cpu.h>
26#include <linux/export.h>
27#include <linux/percpu.h>
28#include <linux/hrtimer.h>
29#include <linux/notifier.h>
30#include <linux/syscalls.h>
31#include <linux/interrupt.h>
32#include <linux/tick.h>
33#include <linux/err.h>
34#include <linux/debugobjects.h>
35#include <linux/sched/signal.h>
36#include <linux/sched/sysctl.h>
37#include <linux/sched/rt.h>
38#include <linux/sched/deadline.h>
39#include <linux/sched/nohz.h>
40#include <linux/sched/debug.h>
41#include <linux/timer.h>
42#include <linux/freezer.h>
43#include <linux/compat.h>
44
45#include <linux/uaccess.h>
46
47#include <trace/events/timer.h>
48
49#include "tick-internal.h"
50
51
52
53
54
55#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
56#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
57#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
58#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
59
60
61
62
63
64
65
66
67
68DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
69{
70 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
71 .clock_base =
72 {
73 {
74 .index = HRTIMER_BASE_MONOTONIC,
75 .clockid = CLOCK_MONOTONIC,
76 .get_time = &ktime_get,
77 },
78 {
79 .index = HRTIMER_BASE_REALTIME,
80 .clockid = CLOCK_REALTIME,
81 .get_time = &ktime_get_real,
82 },
83 {
84 .index = HRTIMER_BASE_BOOTTIME,
85 .clockid = CLOCK_BOOTTIME,
86 .get_time = &ktime_get_boottime,
87 },
88 {
89 .index = HRTIMER_BASE_TAI,
90 .clockid = CLOCK_TAI,
91 .get_time = &ktime_get_clocktai,
92 },
93 {
94 .index = HRTIMER_BASE_MONOTONIC_SOFT,
95 .clockid = CLOCK_MONOTONIC,
96 .get_time = &ktime_get,
97 },
98 {
99 .index = HRTIMER_BASE_REALTIME_SOFT,
100 .clockid = CLOCK_REALTIME,
101 .get_time = &ktime_get_real,
102 },
103 {
104 .index = HRTIMER_BASE_BOOTTIME_SOFT,
105 .clockid = CLOCK_BOOTTIME,
106 .get_time = &ktime_get_boottime,
107 },
108 {
109 .index = HRTIMER_BASE_TAI_SOFT,
110 .clockid = CLOCK_TAI,
111 .get_time = &ktime_get_clocktai,
112 },
113 }
114};
115
116static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
117
118 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
119
120 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
121 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
122 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
123 [CLOCK_TAI] = HRTIMER_BASE_TAI,
124};
125
126
127
128
129
130#ifdef CONFIG_SMP
131
132
133
134
135
136
137static struct hrtimer_cpu_base migration_cpu_base = {
138 .clock_base = { { .cpu_base = &migration_cpu_base, }, },
139};
140
141#define migration_base migration_cpu_base.clock_base[0]
142
143static inline bool is_migration_base(struct hrtimer_clock_base *base)
144{
145 return base == &migration_base;
146}
147
148
149
150
151
152
153
154
155
156
157
158
159
160static
161struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
162 unsigned long *flags)
163{
164 struct hrtimer_clock_base *base;
165
166 for (;;) {
167 base = READ_ONCE(timer->base);
168 if (likely(base != &migration_base)) {
169 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
170 if (likely(base == timer->base))
171 return base;
172
173 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
174 }
175 cpu_relax();
176 }
177}
178
179
180
181
182
183
184
185
186
187
188static int
189hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
190{
191 ktime_t expires;
192
193 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
194 return expires < new_base->cpu_base->expires_next;
195}
196
197static inline
198struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
199 int pinned)
200{
201#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
202 if (static_branch_likely(&timers_migration_enabled) && !pinned)
203 return &per_cpu(hrtimer_bases, get_nohz_timer_target());
204#endif
205 return base;
206}
207
208
209
210
211
212
213
214
215
216
217
218
219
220static inline struct hrtimer_clock_base *
221switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
222 int pinned)
223{
224 struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
225 struct hrtimer_clock_base *new_base;
226 int basenum = base->index;
227
228 this_cpu_base = this_cpu_ptr(&hrtimer_bases);
229 new_cpu_base = get_target_base(this_cpu_base, pinned);
230again:
231 new_base = &new_cpu_base->clock_base[basenum];
232
233 if (base != new_base) {
234
235
236
237
238
239
240
241
242
243 if (unlikely(hrtimer_callback_running(timer)))
244 return base;
245
246
247 WRITE_ONCE(timer->base, &migration_base);
248 raw_spin_unlock(&base->cpu_base->lock);
249 raw_spin_lock(&new_base->cpu_base->lock);
250
251 if (new_cpu_base != this_cpu_base &&
252 hrtimer_check_target(timer, new_base)) {
253 raw_spin_unlock(&new_base->cpu_base->lock);
254 raw_spin_lock(&base->cpu_base->lock);
255 new_cpu_base = this_cpu_base;
256 WRITE_ONCE(timer->base, base);
257 goto again;
258 }
259 WRITE_ONCE(timer->base, new_base);
260 } else {
261 if (new_cpu_base != this_cpu_base &&
262 hrtimer_check_target(timer, new_base)) {
263 new_cpu_base = this_cpu_base;
264 goto again;
265 }
266 }
267 return new_base;
268}
269
270#else
271
272static inline bool is_migration_base(struct hrtimer_clock_base *base)
273{
274 return false;
275}
276
277static inline struct hrtimer_clock_base *
278lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
279{
280 struct hrtimer_clock_base *base = timer->base;
281
282 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
283
284 return base;
285}
286
287# define switch_hrtimer_base(t, b, p) (b)
288
289#endif
290
291
292
293
294
295#if BITS_PER_LONG < 64
296
297
298
299s64 __ktime_divns(const ktime_t kt, s64 div)
300{
301 int sft = 0;
302 s64 dclc;
303 u64 tmp;
304
305 dclc = ktime_to_ns(kt);
306 tmp = dclc < 0 ? -dclc : dclc;
307
308
309 while (div >> 32) {
310 sft++;
311 div >>= 1;
312 }
313 tmp >>= sft;
314 do_div(tmp, (unsigned long) div);
315 return dclc < 0 ? -tmp : tmp;
316}
317EXPORT_SYMBOL_GPL(__ktime_divns);
318#endif
319
320
321
322
323ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
324{
325 ktime_t res = ktime_add_unsafe(lhs, rhs);
326
327
328
329
330
331 if (res < 0 || res < lhs || res < rhs)
332 res = ktime_set(KTIME_SEC_MAX, 0);
333
334 return res;
335}
336
337EXPORT_SYMBOL_GPL(ktime_add_safe);
338
339#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
340
341static struct debug_obj_descr hrtimer_debug_descr;
342
343static void *hrtimer_debug_hint(void *addr)
344{
345 return ((struct hrtimer *) addr)->function;
346}
347
348
349
350
351
352static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
353{
354 struct hrtimer *timer = addr;
355
356 switch (state) {
357 case ODEBUG_STATE_ACTIVE:
358 hrtimer_cancel(timer);
359 debug_object_init(timer, &hrtimer_debug_descr);
360 return true;
361 default:
362 return false;
363 }
364}
365
366
367
368
369
370
371static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
372{
373 switch (state) {
374 case ODEBUG_STATE_ACTIVE:
375 WARN_ON(1);
376
377 default:
378 return false;
379 }
380}
381
382
383
384
385
386static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
387{
388 struct hrtimer *timer = addr;
389
390 switch (state) {
391 case ODEBUG_STATE_ACTIVE:
392 hrtimer_cancel(timer);
393 debug_object_free(timer, &hrtimer_debug_descr);
394 return true;
395 default:
396 return false;
397 }
398}
399
400static struct debug_obj_descr hrtimer_debug_descr = {
401 .name = "hrtimer",
402 .debug_hint = hrtimer_debug_hint,
403 .fixup_init = hrtimer_fixup_init,
404 .fixup_activate = hrtimer_fixup_activate,
405 .fixup_free = hrtimer_fixup_free,
406};
407
408static inline void debug_hrtimer_init(struct hrtimer *timer)
409{
410 debug_object_init(timer, &hrtimer_debug_descr);
411}
412
413static inline void debug_hrtimer_activate(struct hrtimer *timer,
414 enum hrtimer_mode mode)
415{
416 debug_object_activate(timer, &hrtimer_debug_descr);
417}
418
419static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
420{
421 debug_object_deactivate(timer, &hrtimer_debug_descr);
422}
423
424static inline void debug_hrtimer_free(struct hrtimer *timer)
425{
426 debug_object_free(timer, &hrtimer_debug_descr);
427}
428
429static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
430 enum hrtimer_mode mode);
431
432void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
433 enum hrtimer_mode mode)
434{
435 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
436 __hrtimer_init(timer, clock_id, mode);
437}
438EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
439
440static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
441 clockid_t clock_id, enum hrtimer_mode mode);
442
443void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
444 clockid_t clock_id, enum hrtimer_mode mode)
445{
446 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
447 __hrtimer_init_sleeper(sl, clock_id, mode);
448}
449EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
450
451void destroy_hrtimer_on_stack(struct hrtimer *timer)
452{
453 debug_object_free(timer, &hrtimer_debug_descr);
454}
455EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
456
457#else
458
459static inline void debug_hrtimer_init(struct hrtimer *timer) { }
460static inline void debug_hrtimer_activate(struct hrtimer *timer,
461 enum hrtimer_mode mode) { }
462static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
463#endif
464
465static inline void
466debug_init(struct hrtimer *timer, clockid_t clockid,
467 enum hrtimer_mode mode)
468{
469 debug_hrtimer_init(timer);
470 trace_hrtimer_init(timer, clockid, mode);
471}
472
473static inline void debug_activate(struct hrtimer *timer,
474 enum hrtimer_mode mode)
475{
476 debug_hrtimer_activate(timer, mode);
477 trace_hrtimer_start(timer, mode);
478}
479
480static inline void debug_deactivate(struct hrtimer *timer)
481{
482 debug_hrtimer_deactivate(timer);
483 trace_hrtimer_cancel(timer);
484}
485
486static struct hrtimer_clock_base *
487__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
488{
489 unsigned int idx;
490
491 if (!*active)
492 return NULL;
493
494 idx = __ffs(*active);
495 *active &= ~(1U << idx);
496
497 return &cpu_base->clock_base[idx];
498}
499
500#define for_each_active_base(base, cpu_base, active) \
501 while ((base = __next_base((cpu_base), &(active))))
502
503static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
504 const struct hrtimer *exclude,
505 unsigned int active,
506 ktime_t expires_next)
507{
508 struct hrtimer_clock_base *base;
509 ktime_t expires;
510
511 for_each_active_base(base, cpu_base, active) {
512 struct timerqueue_node *next;
513 struct hrtimer *timer;
514
515 next = timerqueue_getnext(&base->active);
516 timer = container_of(next, struct hrtimer, node);
517 if (timer == exclude) {
518
519 next = timerqueue_iterate_next(next);
520 if (!next)
521 continue;
522
523 timer = container_of(next, struct hrtimer, node);
524 }
525 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
526 if (expires < expires_next) {
527 expires_next = expires;
528
529
530 if (exclude)
531 continue;
532
533 if (timer->is_soft)
534 cpu_base->softirq_next_timer = timer;
535 else
536 cpu_base->next_timer = timer;
537 }
538 }
539
540
541
542
543
544 if (expires_next < 0)
545 expires_next = 0;
546 return expires_next;
547}
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566static ktime_t
567__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
568{
569 unsigned int active;
570 struct hrtimer *next_timer = NULL;
571 ktime_t expires_next = KTIME_MAX;
572
573 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
574 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
575 cpu_base->softirq_next_timer = NULL;
576 expires_next = __hrtimer_next_event_base(cpu_base, NULL,
577 active, KTIME_MAX);
578
579 next_timer = cpu_base->softirq_next_timer;
580 }
581
582 if (active_mask & HRTIMER_ACTIVE_HARD) {
583 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
584 cpu_base->next_timer = next_timer;
585 expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
586 expires_next);
587 }
588
589 return expires_next;
590}
591
592static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
593{
594 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
595 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
596 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
597
598 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
599 offs_real, offs_boot, offs_tai);
600
601 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
602 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
603 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
604
605 return now;
606}
607
608
609
610
611static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
612{
613 return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
614 cpu_base->hres_active : 0;
615}
616
617static inline int hrtimer_hres_active(void)
618{
619 return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
620}
621
622
623
624
625
626
627static void
628hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
629{
630 ktime_t expires_next;
631
632
633
634
635 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
636
637 if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
638
639
640
641
642
643 if (cpu_base->softirq_activated)
644 expires_next = __hrtimer_get_next_event(cpu_base,
645 HRTIMER_ACTIVE_HARD);
646 else
647 cpu_base->softirq_expires_next = expires_next;
648 }
649
650 if (skip_equal && expires_next == cpu_base->expires_next)
651 return;
652
653 cpu_base->expires_next = expires_next;
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
673 return;
674
675 tick_program_event(cpu_base->expires_next, 1);
676}
677
678
679#ifdef CONFIG_HIGH_RES_TIMERS
680
681
682
683
684static bool hrtimer_hres_enabled __read_mostly = true;
685unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
686EXPORT_SYMBOL_GPL(hrtimer_resolution);
687
688
689
690
691static int __init setup_hrtimer_hres(char *str)
692{
693 return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
694}
695
696__setup("highres=", setup_hrtimer_hres);
697
698
699
700
701static inline int hrtimer_is_hres_enabled(void)
702{
703 return hrtimer_hres_enabled;
704}
705
706
707
708
709
710
711static void retrigger_next_event(void *arg)
712{
713 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
714
715 if (!__hrtimer_hres_active(base))
716 return;
717
718 raw_spin_lock(&base->lock);
719 hrtimer_update_base(base);
720 hrtimer_force_reprogram(base, 0);
721 raw_spin_unlock(&base->lock);
722}
723
724
725
726
727static void hrtimer_switch_to_hres(void)
728{
729 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
730
731 if (tick_init_highres()) {
732 pr_warn("Could not switch to high resolution mode on CPU %u\n",
733 base->cpu);
734 return;
735 }
736 base->hres_active = 1;
737 hrtimer_resolution = HIGH_RES_NSEC;
738
739 tick_setup_sched_timer();
740
741 retrigger_next_event(NULL);
742}
743
744static void clock_was_set_work(struct work_struct *work)
745{
746 clock_was_set();
747}
748
749static DECLARE_WORK(hrtimer_work, clock_was_set_work);
750
751
752
753
754
755void clock_was_set_delayed(void)
756{
757 schedule_work(&hrtimer_work);
758}
759
760#else
761
762static inline int hrtimer_is_hres_enabled(void) { return 0; }
763static inline void hrtimer_switch_to_hres(void) { }
764static inline void retrigger_next_event(void *arg) { }
765
766#endif
767
768
769
770
771
772
773
774
775static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
776{
777 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
778 struct hrtimer_clock_base *base = timer->base;
779 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
780
781 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
782
783
784
785
786
787 if (expires < 0)
788 expires = 0;
789
790 if (timer->is_soft) {
791
792
793
794
795
796
797
798 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
799
800 if (timer_cpu_base->softirq_activated)
801 return;
802
803 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
804 return;
805
806 timer_cpu_base->softirq_next_timer = timer;
807 timer_cpu_base->softirq_expires_next = expires;
808
809 if (!ktime_before(expires, timer_cpu_base->expires_next) ||
810 !reprogram)
811 return;
812 }
813
814
815
816
817
818 if (base->cpu_base != cpu_base)
819 return;
820
821
822
823
824
825
826
827
828 if (cpu_base->in_hrtirq)
829 return;
830
831 if (expires >= cpu_base->expires_next)
832 return;
833
834
835 cpu_base->next_timer = timer;
836 cpu_base->expires_next = expires;
837
838
839
840
841
842
843
844
845
846
847 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
848 return;
849
850
851
852
853
854 tick_program_event(expires, 1);
855}
856
857
858
859
860
861
862
863
864
865
866
867
868void clock_was_set(void)
869{
870#ifdef CONFIG_HIGH_RES_TIMERS
871
872 on_each_cpu(retrigger_next_event, NULL, 1);
873#endif
874 timerfd_clock_was_set();
875}
876
877
878
879
880
881
882
883void hrtimers_resume(void)
884{
885 lockdep_assert_irqs_disabled();
886
887 retrigger_next_event(NULL);
888
889 clock_was_set_delayed();
890}
891
892
893
894
895static inline
896void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
897{
898 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
899}
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
919{
920 u64 orun = 1;
921 ktime_t delta;
922
923 delta = ktime_sub(now, hrtimer_get_expires(timer));
924
925 if (delta < 0)
926 return 0;
927
928 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
929 return 0;
930
931 if (interval < hrtimer_resolution)
932 interval = hrtimer_resolution;
933
934 if (unlikely(delta >= interval)) {
935 s64 incr = ktime_to_ns(interval);
936
937 orun = ktime_divns(delta, incr);
938 hrtimer_add_expires_ns(timer, incr * orun);
939 if (hrtimer_get_expires_tv64(timer) > now)
940 return orun;
941
942
943
944
945 orun++;
946 }
947 hrtimer_add_expires(timer, interval);
948
949 return orun;
950}
951EXPORT_SYMBOL_GPL(hrtimer_forward);
952
953
954
955
956
957
958
959
960
961static int enqueue_hrtimer(struct hrtimer *timer,
962 struct hrtimer_clock_base *base,
963 enum hrtimer_mode mode)
964{
965 debug_activate(timer, mode);
966
967 base->cpu_base->active_bases |= 1 << base->index;
968
969 timer->state = HRTIMER_STATE_ENQUEUED;
970
971 return timerqueue_add(&base->active, &timer->node);
972}
973
974
975
976
977
978
979
980
981
982
983
984static void __remove_hrtimer(struct hrtimer *timer,
985 struct hrtimer_clock_base *base,
986 u8 newstate, int reprogram)
987{
988 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
989 u8 state = timer->state;
990
991 timer->state = newstate;
992 if (!(state & HRTIMER_STATE_ENQUEUED))
993 return;
994
995 if (!timerqueue_del(&base->active, &timer->node))
996 cpu_base->active_bases &= ~(1 << base->index);
997
998
999
1000
1001
1002
1003
1004
1005
1006 if (reprogram && timer == cpu_base->next_timer)
1007 hrtimer_force_reprogram(cpu_base, 1);
1008}
1009
1010
1011
1012
1013static inline int
1014remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
1015{
1016 if (hrtimer_is_queued(timer)) {
1017 u8 state = timer->state;
1018 int reprogram;
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028 debug_deactivate(timer);
1029 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1030
1031 if (!restart)
1032 state = HRTIMER_STATE_INACTIVE;
1033
1034 __remove_hrtimer(timer, base, state, reprogram);
1035 return 1;
1036 }
1037 return 0;
1038}
1039
1040static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
1041 const enum hrtimer_mode mode)
1042{
1043#ifdef CONFIG_TIME_LOW_RES
1044
1045
1046
1047
1048
1049 timer->is_rel = mode & HRTIMER_MODE_REL;
1050 if (timer->is_rel)
1051 tim = ktime_add_safe(tim, hrtimer_resolution);
1052#endif
1053 return tim;
1054}
1055
1056static void
1057hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
1058{
1059 ktime_t expires;
1060
1061
1062
1063
1064 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
1065
1066
1067
1068
1069
1070
1071 if (expires == KTIME_MAX)
1072 return;
1073
1074
1075
1076
1077
1078 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
1079}
1080
1081static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1082 u64 delta_ns, const enum hrtimer_mode mode,
1083 struct hrtimer_clock_base *base)
1084{
1085 struct hrtimer_clock_base *new_base;
1086
1087
1088 remove_hrtimer(timer, base, true);
1089
1090 if (mode & HRTIMER_MODE_REL)
1091 tim = ktime_add_safe(tim, base->get_time());
1092
1093 tim = hrtimer_update_lowres(timer, tim, mode);
1094
1095 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1096
1097
1098 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1099
1100 return enqueue_hrtimer(timer, new_base, mode);
1101}
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1113 u64 delta_ns, const enum hrtimer_mode mode)
1114{
1115 struct hrtimer_clock_base *base;
1116 unsigned long flags;
1117
1118
1119
1120
1121
1122
1123 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1124 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
1125 else
1126 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
1127
1128 base = lock_hrtimer_base(timer, &flags);
1129
1130 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
1131 hrtimer_reprogram(timer, true);
1132
1133 unlock_hrtimer_base(timer, &flags);
1134}
1135EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148int hrtimer_try_to_cancel(struct hrtimer *timer)
1149{
1150 struct hrtimer_clock_base *base;
1151 unsigned long flags;
1152 int ret = -1;
1153
1154
1155
1156
1157
1158
1159
1160 if (!hrtimer_active(timer))
1161 return 0;
1162
1163 base = lock_hrtimer_base(timer, &flags);
1164
1165 if (!hrtimer_callback_running(timer))
1166 ret = remove_hrtimer(timer, base, false);
1167
1168 unlock_hrtimer_base(timer, &flags);
1169
1170 return ret;
1171
1172}
1173EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1174
1175#ifdef CONFIG_PREEMPT_RT
1176static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
1177{
1178 spin_lock_init(&base->softirq_expiry_lock);
1179}
1180
1181static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
1182{
1183 spin_lock(&base->softirq_expiry_lock);
1184}
1185
1186static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
1187{
1188 spin_unlock(&base->softirq_expiry_lock);
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
1199 unsigned long flags)
1200{
1201 if (atomic_read(&cpu_base->timer_waiters)) {
1202 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1203 spin_unlock(&cpu_base->softirq_expiry_lock);
1204 spin_lock(&cpu_base->softirq_expiry_lock);
1205 raw_spin_lock_irq(&cpu_base->lock);
1206 }
1207}
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225void hrtimer_cancel_wait_running(const struct hrtimer *timer)
1226{
1227
1228 struct hrtimer_clock_base *base = READ_ONCE(timer->base);
1229
1230
1231
1232
1233
1234 if (!timer->is_soft || is_migration_base(base)) {
1235 cpu_relax();
1236 return;
1237 }
1238
1239
1240
1241
1242
1243
1244
1245
1246 atomic_inc(&base->cpu_base->timer_waiters);
1247 spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
1248 atomic_dec(&base->cpu_base->timer_waiters);
1249 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
1250}
1251#else
1252static inline void
1253hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
1254static inline void
1255hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
1256static inline void
1257hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
1258static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
1259 unsigned long flags) { }
1260#endif
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270int hrtimer_cancel(struct hrtimer *timer)
1271{
1272 int ret;
1273
1274 do {
1275 ret = hrtimer_try_to_cancel(timer);
1276
1277 if (ret < 0)
1278 hrtimer_cancel_wait_running(timer);
1279 } while (ret < 0);
1280 return ret;
1281}
1282EXPORT_SYMBOL_GPL(hrtimer_cancel);
1283
1284
1285
1286
1287
1288
1289ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1290{
1291 unsigned long flags;
1292 ktime_t rem;
1293
1294 lock_hrtimer_base(timer, &flags);
1295 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
1296 rem = hrtimer_expires_remaining_adjusted(timer);
1297 else
1298 rem = hrtimer_expires_remaining(timer);
1299 unlock_hrtimer_base(timer, &flags);
1300
1301 return rem;
1302}
1303EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
1304
1305#ifdef CONFIG_NO_HZ_COMMON
1306
1307
1308
1309
1310
1311u64 hrtimer_get_next_event(void)
1312{
1313 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1314 u64 expires = KTIME_MAX;
1315 unsigned long flags;
1316
1317 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1318
1319 if (!__hrtimer_hres_active(cpu_base))
1320 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1321
1322 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1323
1324 return expires;
1325}
1326
1327
1328
1329
1330
1331
1332
1333
1334u64 hrtimer_next_event_without(const struct hrtimer *exclude)
1335{
1336 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1337 u64 expires = KTIME_MAX;
1338 unsigned long flags;
1339
1340 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1341
1342 if (__hrtimer_hres_active(cpu_base)) {
1343 unsigned int active;
1344
1345 if (!cpu_base->softirq_activated) {
1346 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
1347 expires = __hrtimer_next_event_base(cpu_base, exclude,
1348 active, KTIME_MAX);
1349 }
1350 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
1351 expires = __hrtimer_next_event_base(cpu_base, exclude, active,
1352 expires);
1353 }
1354
1355 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1356
1357 return expires;
1358}
1359#endif
1360
1361static inline int hrtimer_clockid_to_base(clockid_t clock_id)
1362{
1363 if (likely(clock_id < MAX_CLOCKS)) {
1364 int base = hrtimer_clock_to_base_table[clock_id];
1365
1366 if (likely(base != HRTIMER_MAX_CLOCK_BASES))
1367 return base;
1368 }
1369 WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
1370 return HRTIMER_BASE_MONOTONIC;
1371}
1372
1373static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1374 enum hrtimer_mode mode)
1375{
1376 bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
1377 struct hrtimer_cpu_base *cpu_base;
1378 int base;
1379
1380
1381
1382
1383
1384
1385
1386 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
1387 softtimer = true;
1388
1389 memset(timer, 0, sizeof(struct hrtimer));
1390
1391 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1392
1393
1394
1395
1396
1397
1398 if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
1399 clock_id = CLOCK_MONOTONIC;
1400
1401 base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
1402 base += hrtimer_clockid_to_base(clock_id);
1403 timer->is_soft = softtimer;
1404 timer->is_hard = !softtimer;
1405 timer->base = &cpu_base->clock_base[base];
1406 timerqueue_init(&timer->node);
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1422 enum hrtimer_mode mode)
1423{
1424 debug_init(timer, clock_id, mode);
1425 __hrtimer_init(timer, clock_id, mode);
1426}
1427EXPORT_SYMBOL_GPL(hrtimer_init);
1428
1429
1430
1431
1432
1433
1434
1435
1436bool hrtimer_active(const struct hrtimer *timer)
1437{
1438 struct hrtimer_clock_base *base;
1439 unsigned int seq;
1440
1441 do {
1442 base = READ_ONCE(timer->base);
1443 seq = raw_read_seqcount_begin(&base->seq);
1444
1445 if (timer->state != HRTIMER_STATE_INACTIVE ||
1446 base->running == timer)
1447 return true;
1448
1449 } while (read_seqcount_retry(&base->seq, seq) ||
1450 base != READ_ONCE(timer->base));
1451
1452 return false;
1453}
1454EXPORT_SYMBOL_GPL(hrtimer_active);
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1475 struct hrtimer_clock_base *base,
1476 struct hrtimer *timer, ktime_t *now,
1477 unsigned long flags)
1478{
1479 enum hrtimer_restart (*fn)(struct hrtimer *);
1480 int restart;
1481
1482 lockdep_assert_held(&cpu_base->lock);
1483
1484 debug_deactivate(timer);
1485 base->running = timer;
1486
1487
1488
1489
1490
1491
1492
1493
1494 raw_write_seqcount_barrier(&base->seq);
1495
1496 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
1497 fn = timer->function;
1498
1499
1500
1501
1502
1503
1504 if (IS_ENABLED(CONFIG_TIME_LOW_RES))
1505 timer->is_rel = false;
1506
1507
1508
1509
1510
1511
1512 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1513 trace_hrtimer_expire_entry(timer, now);
1514 restart = fn(timer);
1515 trace_hrtimer_expire_exit(timer);
1516 raw_spin_lock_irq(&cpu_base->lock);
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527 if (restart != HRTIMER_NORESTART &&
1528 !(timer->state & HRTIMER_STATE_ENQUEUED))
1529 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
1530
1531
1532
1533
1534
1535
1536
1537
1538 raw_write_seqcount_barrier(&base->seq);
1539
1540 WARN_ON_ONCE(base->running != timer);
1541 base->running = NULL;
1542}
1543
1544static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
1545 unsigned long flags, unsigned int active_mask)
1546{
1547 struct hrtimer_clock_base *base;
1548 unsigned int active = cpu_base->active_bases & active_mask;
1549
1550 for_each_active_base(base, cpu_base, active) {
1551 struct timerqueue_node *node;
1552 ktime_t basenow;
1553
1554 basenow = ktime_add(now, base->offset);
1555
1556 while ((node = timerqueue_getnext(&base->active))) {
1557 struct hrtimer *timer;
1558
1559 timer = container_of(node, struct hrtimer, node);
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 if (basenow < hrtimer_get_softexpires_tv64(timer))
1574 break;
1575
1576 __run_hrtimer(cpu_base, base, timer, &basenow, flags);
1577 if (active_mask == HRTIMER_ACTIVE_SOFT)
1578 hrtimer_sync_wait_running(cpu_base, flags);
1579 }
1580 }
1581}
1582
1583static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
1584{
1585 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1586 unsigned long flags;
1587 ktime_t now;
1588
1589 hrtimer_cpu_base_lock_expiry(cpu_base);
1590 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1591
1592 now = hrtimer_update_base(cpu_base);
1593 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
1594
1595 cpu_base->softirq_activated = 0;
1596 hrtimer_update_softirq_timer(cpu_base, true);
1597
1598 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1599 hrtimer_cpu_base_unlock_expiry(cpu_base);
1600}
1601
1602#ifdef CONFIG_HIGH_RES_TIMERS
1603
1604
1605
1606
1607
1608void hrtimer_interrupt(struct clock_event_device *dev)
1609{
1610 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1611 ktime_t expires_next, now, entry_time, delta;
1612 unsigned long flags;
1613 int retries = 0;
1614
1615 BUG_ON(!cpu_base->hres_active);
1616 cpu_base->nr_events++;
1617 dev->next_event = KTIME_MAX;
1618
1619 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1620 entry_time = now = hrtimer_update_base(cpu_base);
1621retry:
1622 cpu_base->in_hrtirq = 1;
1623
1624
1625
1626
1627
1628
1629
1630 cpu_base->expires_next = KTIME_MAX;
1631
1632 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1633 cpu_base->softirq_expires_next = KTIME_MAX;
1634 cpu_base->softirq_activated = 1;
1635 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1636 }
1637
1638 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1639
1640
1641 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1642
1643
1644
1645
1646 cpu_base->expires_next = expires_next;
1647 cpu_base->in_hrtirq = 0;
1648 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1649
1650
1651 if (!tick_program_event(expires_next, 0)) {
1652 cpu_base->hang_detected = 0;
1653 return;
1654 }
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1670 now = hrtimer_update_base(cpu_base);
1671 cpu_base->nr_retries++;
1672 if (++retries < 3)
1673 goto retry;
1674
1675
1676
1677
1678
1679
1680 cpu_base->nr_hangs++;
1681 cpu_base->hang_detected = 1;
1682 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1683
1684 delta = ktime_sub(now, entry_time);
1685 if ((unsigned int)delta > cpu_base->max_hang_time)
1686 cpu_base->max_hang_time = (unsigned int) delta;
1687
1688
1689
1690
1691 if (delta > 100 * NSEC_PER_MSEC)
1692 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1693 else
1694 expires_next = ktime_add(now, delta);
1695 tick_program_event(expires_next, 1);
1696 pr_warn_once("hrtimer: interrupt took %llu ns\n", ktime_to_ns(delta));
1697}
1698
1699
1700static inline void __hrtimer_peek_ahead_timers(void)
1701{
1702 struct tick_device *td;
1703
1704 if (!hrtimer_hres_active())
1705 return;
1706
1707 td = this_cpu_ptr(&tick_cpu_device);
1708 if (td && td->evtdev)
1709 hrtimer_interrupt(td->evtdev);
1710}
1711
1712#else
1713
1714static inline void __hrtimer_peek_ahead_timers(void) { }
1715
1716#endif
1717
1718
1719
1720
1721void hrtimer_run_queues(void)
1722{
1723 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1724 unsigned long flags;
1725 ktime_t now;
1726
1727 if (__hrtimer_hres_active(cpu_base))
1728 return;
1729
1730
1731
1732
1733
1734
1735
1736
1737 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1738 hrtimer_switch_to_hres();
1739 return;
1740 }
1741
1742 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1743 now = hrtimer_update_base(cpu_base);
1744
1745 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1746 cpu_base->softirq_expires_next = KTIME_MAX;
1747 cpu_base->softirq_activated = 1;
1748 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1749 }
1750
1751 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1752 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1753}
1754
1755
1756
1757
1758static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1759{
1760 struct hrtimer_sleeper *t =
1761 container_of(timer, struct hrtimer_sleeper, timer);
1762 struct task_struct *task = t->task;
1763
1764 t->task = NULL;
1765 if (task)
1766 wake_up_process(task);
1767
1768 return HRTIMER_NORESTART;
1769}
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
1780 enum hrtimer_mode mode)
1781{
1782
1783
1784
1785
1786
1787
1788
1789 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
1790 mode |= HRTIMER_MODE_HARD;
1791
1792 hrtimer_start_expires(&sl->timer, mode);
1793}
1794EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
1795
1796static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
1797 clockid_t clock_id, enum hrtimer_mode mode)
1798{
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1819 if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
1820 mode |= HRTIMER_MODE_HARD;
1821 }
1822
1823 __hrtimer_init(&sl->timer, clock_id, mode);
1824 sl->timer.function = hrtimer_wakeup;
1825 sl->task = current;
1826}
1827
1828
1829
1830
1831
1832
1833
1834void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
1835 enum hrtimer_mode mode)
1836{
1837 debug_init(&sl->timer, clock_id, mode);
1838 __hrtimer_init_sleeper(sl, clock_id, mode);
1839
1840}
1841EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1842
1843int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
1844{
1845 switch(restart->nanosleep.type) {
1846#ifdef CONFIG_COMPAT_32BIT_TIME
1847 case TT_COMPAT:
1848 if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
1849 return -EFAULT;
1850 break;
1851#endif
1852 case TT_NATIVE:
1853 if (put_timespec64(ts, restart->nanosleep.rmtp))
1854 return -EFAULT;
1855 break;
1856 default:
1857 BUG();
1858 }
1859 return -ERESTART_RESTARTBLOCK;
1860}
1861
1862static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1863{
1864 struct restart_block *restart;
1865
1866 do {
1867 set_current_state(TASK_INTERRUPTIBLE);
1868 hrtimer_sleeper_start_expires(t, mode);
1869
1870 if (likely(t->task))
1871 freezable_schedule();
1872
1873 hrtimer_cancel(&t->timer);
1874 mode = HRTIMER_MODE_ABS;
1875
1876 } while (t->task && !signal_pending(current));
1877
1878 __set_current_state(TASK_RUNNING);
1879
1880 if (!t->task)
1881 return 0;
1882
1883 restart = ¤t->restart_block;
1884 if (restart->nanosleep.type != TT_NONE) {
1885 ktime_t rem = hrtimer_expires_remaining(&t->timer);
1886 struct timespec64 rmt;
1887
1888 if (rem <= 0)
1889 return 0;
1890 rmt = ktime_to_timespec64(rem);
1891
1892 return nanosleep_copyout(restart, &rmt);
1893 }
1894 return -ERESTART_RESTARTBLOCK;
1895}
1896
1897static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1898{
1899 struct hrtimer_sleeper t;
1900 int ret;
1901
1902 hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
1903 HRTIMER_MODE_ABS);
1904 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1905 ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
1906 destroy_hrtimer_on_stack(&t.timer);
1907 return ret;
1908}
1909
1910long hrtimer_nanosleep(const struct timespec64 *rqtp,
1911 const enum hrtimer_mode mode, const clockid_t clockid)
1912{
1913 struct restart_block *restart;
1914 struct hrtimer_sleeper t;
1915 int ret = 0;
1916 u64 slack;
1917
1918 slack = current->timer_slack_ns;
1919 if (dl_task(current) || rt_task(current))
1920 slack = 0;
1921
1922 hrtimer_init_sleeper_on_stack(&t, clockid, mode);
1923 hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
1924 ret = do_nanosleep(&t, mode);
1925 if (ret != -ERESTART_RESTARTBLOCK)
1926 goto out;
1927
1928
1929 if (mode == HRTIMER_MODE_ABS) {
1930 ret = -ERESTARTNOHAND;
1931 goto out;
1932 }
1933
1934 restart = ¤t->restart_block;
1935 restart->fn = hrtimer_nanosleep_restart;
1936 restart->nanosleep.clockid = t.timer.base->clockid;
1937 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1938out:
1939 destroy_hrtimer_on_stack(&t.timer);
1940 return ret;
1941}
1942
1943#if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT)
1944
1945SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
1946 struct __kernel_timespec __user *, rmtp)
1947{
1948 struct timespec64 tu;
1949
1950 if (get_timespec64(&tu, rqtp))
1951 return -EFAULT;
1952
1953 if (!timespec64_valid(&tu))
1954 return -EINVAL;
1955
1956 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1957 current->restart_block.nanosleep.rmtp = rmtp;
1958 return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1959}
1960
1961#endif
1962
1963#ifdef CONFIG_COMPAT_32BIT_TIME
1964
1965SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
1966 struct old_timespec32 __user *, rmtp)
1967{
1968 struct timespec64 tu;
1969
1970 if (get_old_timespec32(&tu, rqtp))
1971 return -EFAULT;
1972
1973 if (!timespec64_valid(&tu))
1974 return -EINVAL;
1975
1976 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1977 current->restart_block.nanosleep.compat_rmtp = rmtp;
1978 return hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1979}
1980#endif
1981
1982
1983
1984
1985int hrtimers_prepare_cpu(unsigned int cpu)
1986{
1987 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1988 int i;
1989
1990 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1991 cpu_base->clock_base[i].cpu_base = cpu_base;
1992 timerqueue_init_head(&cpu_base->clock_base[i].active);
1993 }
1994
1995 cpu_base->cpu = cpu;
1996 cpu_base->active_bases = 0;
1997 cpu_base->hres_active = 0;
1998 cpu_base->hang_detected = 0;
1999 cpu_base->next_timer = NULL;
2000 cpu_base->softirq_next_timer = NULL;
2001 cpu_base->expires_next = KTIME_MAX;
2002 cpu_base->softirq_expires_next = KTIME_MAX;
2003 hrtimer_cpu_base_init_expiry_lock(cpu_base);
2004 return 0;
2005}
2006
2007#ifdef CONFIG_HOTPLUG_CPU
2008
2009static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
2010 struct hrtimer_clock_base *new_base)
2011{
2012 struct hrtimer *timer;
2013 struct timerqueue_node *node;
2014
2015 while ((node = timerqueue_getnext(&old_base->active))) {
2016 timer = container_of(node, struct hrtimer, node);
2017 BUG_ON(hrtimer_callback_running(timer));
2018 debug_deactivate(timer);
2019
2020
2021
2022
2023
2024
2025 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
2026 timer->base = new_base;
2027
2028
2029
2030
2031
2032
2033
2034
2035 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
2036 }
2037}
2038
2039int hrtimers_dead_cpu(unsigned int scpu)
2040{
2041 struct hrtimer_cpu_base *old_base, *new_base;
2042 int i;
2043
2044 BUG_ON(cpu_online(scpu));
2045 tick_cancel_sched_timer(scpu);
2046
2047
2048
2049
2050
2051
2052 local_bh_disable();
2053 local_irq_disable();
2054 old_base = &per_cpu(hrtimer_bases, scpu);
2055 new_base = this_cpu_ptr(&hrtimer_bases);
2056
2057
2058
2059
2060 raw_spin_lock(&new_base->lock);
2061 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2062
2063 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2064 migrate_hrtimer_list(&old_base->clock_base[i],
2065 &new_base->clock_base[i]);
2066 }
2067
2068
2069
2070
2071
2072 hrtimer_update_softirq_timer(new_base, false);
2073
2074 raw_spin_unlock(&old_base->lock);
2075 raw_spin_unlock(&new_base->lock);
2076
2077
2078 __hrtimer_peek_ahead_timers();
2079 local_irq_enable();
2080 local_bh_enable();
2081 return 0;
2082}
2083
2084#endif
2085
2086void __init hrtimers_init(void)
2087{
2088 hrtimers_prepare_cpu(smp_processor_id());
2089 open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
2090}
2091
2092
2093
2094
2095
2096
2097
2098
2099int __sched
2100schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
2101 const enum hrtimer_mode mode, clockid_t clock_id)
2102{
2103 struct hrtimer_sleeper t;
2104
2105
2106
2107
2108
2109 if (expires && *expires == 0) {
2110 __set_current_state(TASK_RUNNING);
2111 return 0;
2112 }
2113
2114
2115
2116
2117 if (!expires) {
2118 schedule();
2119 return -EINTR;
2120 }
2121
2122 hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
2123 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
2124 hrtimer_sleeper_start_expires(&t, mode);
2125
2126 if (likely(t.task))
2127 schedule();
2128
2129 hrtimer_cancel(&t.timer);
2130 destroy_hrtimer_on_stack(&t.timer);
2131
2132 __set_current_state(TASK_RUNNING);
2133
2134 return !t.task ? 0 : -EINTR;
2135}
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
2170 const enum hrtimer_mode mode)
2171{
2172 return schedule_hrtimeout_range_clock(expires, delta, mode,
2173 CLOCK_MONOTONIC);
2174}
2175EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203int __sched schedule_hrtimeout(ktime_t *expires,
2204 const enum hrtimer_mode mode)
2205{
2206 return schedule_hrtimeout_range(expires, 0, mode);
2207}
2208EXPORT_SYMBOL_GPL(schedule_hrtimeout);
2209