1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/cpu.h>
35#include <linux/export.h>
36#include <linux/percpu.h>
37#include <linux/hrtimer.h>
38#include <linux/notifier.h>
39#include <linux/syscalls.h>
40#include <linux/interrupt.h>
41#include <linux/tick.h>
42#include <linux/seq_file.h>
43#include <linux/err.h>
44#include <linux/debugobjects.h>
45#include <linux/sched/signal.h>
46#include <linux/sched/sysctl.h>
47#include <linux/sched/rt.h>
48#include <linux/sched/deadline.h>
49#include <linux/sched/nohz.h>
50#include <linux/sched/debug.h>
51#include <linux/timer.h>
52#include <linux/freezer.h>
53#include <linux/compat.h>
54
55#include <linux/uaccess.h>
56
57#include <trace/events/timer.h>
58
59#include "tick-internal.h"
60
61
62
63
64
65#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
66#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
67#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
68#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
69
70
71
72
73
74
75
76
77
78DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
79{
80 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
81 .clock_base =
82 {
83 {
84 .index = HRTIMER_BASE_MONOTONIC,
85 .clockid = CLOCK_MONOTONIC,
86 .get_time = &ktime_get,
87 },
88 {
89 .index = HRTIMER_BASE_REALTIME,
90 .clockid = CLOCK_REALTIME,
91 .get_time = &ktime_get_real,
92 },
93 {
94 .index = HRTIMER_BASE_BOOTTIME,
95 .clockid = CLOCK_BOOTTIME,
96 .get_time = &ktime_get_boottime,
97 },
98 {
99 .index = HRTIMER_BASE_TAI,
100 .clockid = CLOCK_TAI,
101 .get_time = &ktime_get_clocktai,
102 },
103 {
104 .index = HRTIMER_BASE_MONOTONIC_SOFT,
105 .clockid = CLOCK_MONOTONIC,
106 .get_time = &ktime_get,
107 },
108 {
109 .index = HRTIMER_BASE_REALTIME_SOFT,
110 .clockid = CLOCK_REALTIME,
111 .get_time = &ktime_get_real,
112 },
113 {
114 .index = HRTIMER_BASE_BOOTTIME_SOFT,
115 .clockid = CLOCK_BOOTTIME,
116 .get_time = &ktime_get_boottime,
117 },
118 {
119 .index = HRTIMER_BASE_TAI_SOFT,
120 .clockid = CLOCK_TAI,
121 .get_time = &ktime_get_clocktai,
122 },
123 }
124};
125
126static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
127
128 [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
129
130 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
131 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
132 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
133 [CLOCK_TAI] = HRTIMER_BASE_TAI,
134};
135
136
137
138
139
140#ifdef CONFIG_SMP
141
142
143
144
145
146
147static struct hrtimer_cpu_base migration_cpu_base = {
148 .clock_base = { {
149 .cpu_base = &migration_cpu_base,
150 .seq = SEQCNT_RAW_SPINLOCK_ZERO(migration_cpu_base.seq,
151 &migration_cpu_base.lock),
152 }, },
153};
154
155#define migration_base migration_cpu_base.clock_base[0]
156
157static inline bool is_migration_base(struct hrtimer_clock_base *base)
158{
159 return base == &migration_base;
160}
161
162
163
164
165
166
167
168
169
170
171
172
173
174static
175struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
176 unsigned long *flags)
177{
178 struct hrtimer_clock_base *base;
179
180 for (;;) {
181 base = READ_ONCE(timer->base);
182 if (likely(base != &migration_base)) {
183 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
184 if (likely(base == timer->base))
185 return base;
186
187 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
188 }
189 cpu_relax();
190 }
191}
192
193
194
195
196
197
198
199
200
201
202static int
203hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
204{
205 ktime_t expires;
206
207 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
208 return expires < new_base->cpu_base->expires_next;
209}
210
211static inline
212struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
213 int pinned)
214{
215#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
216 if (static_branch_likely(&timers_migration_enabled) && !pinned)
217 return &per_cpu(hrtimer_bases, get_nohz_timer_target());
218#endif
219 return base;
220}
221
222
223
224
225
226
227
228
229
230
231
232
233
234static inline struct hrtimer_clock_base *
235switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
236 int pinned)
237{
238 struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
239 struct hrtimer_clock_base *new_base;
240 int basenum = base->index;
241
242 this_cpu_base = this_cpu_ptr(&hrtimer_bases);
243 new_cpu_base = get_target_base(this_cpu_base, pinned);
244again:
245 new_base = &new_cpu_base->clock_base[basenum];
246
247 if (base != new_base) {
248
249
250
251
252
253
254
255
256
257 if (unlikely(hrtimer_callback_running(timer)))
258 return base;
259
260
261 WRITE_ONCE(timer->base, &migration_base);
262 raw_spin_unlock(&base->cpu_base->lock);
263 raw_spin_lock(&new_base->cpu_base->lock);
264
265 if (new_cpu_base != this_cpu_base &&
266 hrtimer_check_target(timer, new_base)) {
267 raw_spin_unlock(&new_base->cpu_base->lock);
268 raw_spin_lock(&base->cpu_base->lock);
269 new_cpu_base = this_cpu_base;
270 WRITE_ONCE(timer->base, base);
271 goto again;
272 }
273 WRITE_ONCE(timer->base, new_base);
274 } else {
275 if (new_cpu_base != this_cpu_base &&
276 hrtimer_check_target(timer, new_base)) {
277 new_cpu_base = this_cpu_base;
278 goto again;
279 }
280 }
281 return new_base;
282}
283
284#else
285
286static inline bool is_migration_base(struct hrtimer_clock_base *base)
287{
288 return false;
289}
290
291static inline struct hrtimer_clock_base *
292lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
293{
294 struct hrtimer_clock_base *base = timer->base;
295
296 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
297
298 return base;
299}
300
301# define switch_hrtimer_base(t, b, p) (b)
302
303#endif
304
305
306
307
308
309#if BITS_PER_LONG < 64
310
311
312
313s64 __ktime_divns(const ktime_t kt, s64 div)
314{
315 int sft = 0;
316 s64 dclc;
317 u64 tmp;
318
319 dclc = ktime_to_ns(kt);
320 tmp = dclc < 0 ? -dclc : dclc;
321
322
323 while (div >> 32) {
324 sft++;
325 div >>= 1;
326 }
327 tmp >>= sft;
328 do_div(tmp, (unsigned long) div);
329 return dclc < 0 ? -tmp : tmp;
330}
331EXPORT_SYMBOL_GPL(__ktime_divns);
332#endif
333
334
335
336
337ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
338{
339 ktime_t res = ktime_add_unsafe(lhs, rhs);
340
341
342
343
344
345 if (res < 0 || res < lhs || res < rhs)
346 res = ktime_set(KTIME_SEC_MAX, 0);
347
348 return res;
349}
350
351EXPORT_SYMBOL_GPL(ktime_add_safe);
352
353#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
354
355static struct debug_obj_descr hrtimer_debug_descr;
356
357static void *hrtimer_debug_hint(void *addr)
358{
359 return ((struct hrtimer *) addr)->function;
360}
361
362
363
364
365
366static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
367{
368 struct hrtimer *timer = addr;
369
370 switch (state) {
371 case ODEBUG_STATE_ACTIVE:
372 hrtimer_cancel(timer);
373 debug_object_init(timer, &hrtimer_debug_descr);
374 return true;
375 default:
376 return false;
377 }
378}
379
380
381
382
383
384
385static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
386{
387 switch (state) {
388 case ODEBUG_STATE_ACTIVE:
389 WARN_ON(1);
390
391 default:
392 return false;
393 }
394}
395
396
397
398
399
400static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
401{
402 struct hrtimer *timer = addr;
403
404 switch (state) {
405 case ODEBUG_STATE_ACTIVE:
406 hrtimer_cancel(timer);
407 debug_object_free(timer, &hrtimer_debug_descr);
408 return true;
409 default:
410 return false;
411 }
412}
413
414static struct debug_obj_descr hrtimer_debug_descr = {
415 .name = "hrtimer",
416 .debug_hint = hrtimer_debug_hint,
417 .fixup_init = hrtimer_fixup_init,
418 .fixup_activate = hrtimer_fixup_activate,
419 .fixup_free = hrtimer_fixup_free,
420};
421
422static inline void debug_hrtimer_init(struct hrtimer *timer)
423{
424 debug_object_init(timer, &hrtimer_debug_descr);
425}
426
427static inline void debug_hrtimer_activate(struct hrtimer *timer,
428 enum hrtimer_mode mode)
429{
430 debug_object_activate(timer, &hrtimer_debug_descr);
431}
432
433static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
434{
435 debug_object_deactivate(timer, &hrtimer_debug_descr);
436}
437
438static inline void debug_hrtimer_free(struct hrtimer *timer)
439{
440 debug_object_free(timer, &hrtimer_debug_descr);
441}
442
443static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
444 enum hrtimer_mode mode);
445
446void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
447 enum hrtimer_mode mode)
448{
449 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
450 __hrtimer_init(timer, clock_id, mode);
451}
452EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
453
454static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
455 clockid_t clock_id, enum hrtimer_mode mode);
456
457void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
458 clockid_t clock_id, enum hrtimer_mode mode)
459{
460 debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
461 __hrtimer_init_sleeper(sl, clock_id, mode);
462}
463EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
464
465void destroy_hrtimer_on_stack(struct hrtimer *timer)
466{
467 debug_object_free(timer, &hrtimer_debug_descr);
468}
469EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
470
471#else
472
473static inline void debug_hrtimer_init(struct hrtimer *timer) { }
474static inline void debug_hrtimer_activate(struct hrtimer *timer,
475 enum hrtimer_mode mode) { }
476static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
477#endif
478
479static inline void
480debug_init(struct hrtimer *timer, clockid_t clockid,
481 enum hrtimer_mode mode)
482{
483 debug_hrtimer_init(timer);
484 trace_hrtimer_init(timer, clockid, mode);
485}
486
487static inline void debug_activate(struct hrtimer *timer,
488 enum hrtimer_mode mode)
489{
490 debug_hrtimer_activate(timer, mode);
491 trace_hrtimer_start(timer, mode);
492}
493
494static inline void debug_deactivate(struct hrtimer *timer)
495{
496 debug_hrtimer_deactivate(timer);
497 trace_hrtimer_cancel(timer);
498}
499
500static struct hrtimer_clock_base *
501__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
502{
503 unsigned int idx;
504
505 if (!*active)
506 return NULL;
507
508 idx = __ffs(*active);
509 *active &= ~(1U << idx);
510
511 return &cpu_base->clock_base[idx];
512}
513
514#define for_each_active_base(base, cpu_base, active) \
515 while ((base = __next_base((cpu_base), &(active))))
516
517static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
518 const struct hrtimer *exclude,
519 unsigned int active,
520 ktime_t expires_next)
521{
522 struct hrtimer_clock_base *base;
523 ktime_t expires;
524
525 for_each_active_base(base, cpu_base, active) {
526 struct timerqueue_node *next;
527 struct hrtimer *timer;
528
529 next = timerqueue_getnext(&base->active);
530 timer = container_of(next, struct hrtimer, node);
531 if (timer == exclude) {
532
533 next = timerqueue_iterate_next(next);
534 if (!next)
535 continue;
536
537 timer = container_of(next, struct hrtimer, node);
538 }
539 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
540 if (expires < expires_next) {
541 expires_next = expires;
542
543
544 if (exclude)
545 continue;
546
547 if (timer->is_soft)
548 cpu_base->softirq_next_timer = timer;
549 else
550 cpu_base->next_timer = timer;
551 }
552 }
553
554
555
556
557
558 if (expires_next < 0)
559 expires_next = 0;
560 return expires_next;
561}
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580static ktime_t
581__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
582{
583 unsigned int active;
584 struct hrtimer *next_timer = NULL;
585 ktime_t expires_next = KTIME_MAX;
586
587 if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
588 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
589 cpu_base->softirq_next_timer = NULL;
590 expires_next = __hrtimer_next_event_base(cpu_base, NULL,
591 active, KTIME_MAX);
592
593 next_timer = cpu_base->softirq_next_timer;
594 }
595
596 if (active_mask & HRTIMER_ACTIVE_HARD) {
597 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
598 cpu_base->next_timer = next_timer;
599 expires_next = __hrtimer_next_event_base(cpu_base, NULL, active,
600 expires_next);
601 }
602
603 return expires_next;
604}
605
606static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
607{
608 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
609 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
610 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
611
612 ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
613 offs_real, offs_boot, offs_tai);
614
615 base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
616 base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
617 base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
618
619 return now;
620}
621
622
623
624
625static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
626{
627 return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
628 cpu_base->hres_active : 0;
629}
630
631static inline int hrtimer_hres_active(void)
632{
633 return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
634}
635
636
637
638
639
640
641static void
642hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
643{
644 ktime_t expires_next;
645
646
647
648
649 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
650
651 if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
652
653
654
655
656
657 if (cpu_base->softirq_activated)
658 expires_next = __hrtimer_get_next_event(cpu_base,
659 HRTIMER_ACTIVE_HARD);
660 else
661 cpu_base->softirq_expires_next = expires_next;
662 }
663
664 if (skip_equal && expires_next == cpu_base->expires_next)
665 return;
666
667 cpu_base->expires_next = expires_next;
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
687 return;
688
689 tick_program_event(cpu_base->expires_next, 1);
690}
691
692
693#ifdef CONFIG_HIGH_RES_TIMERS
694
695
696
697
698static bool hrtimer_hres_enabled __read_mostly = true;
699unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
700EXPORT_SYMBOL_GPL(hrtimer_resolution);
701
702
703
704
705static int __init setup_hrtimer_hres(char *str)
706{
707 return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
708}
709
710__setup("highres=", setup_hrtimer_hres);
711
712
713
714
715static inline int hrtimer_is_hres_enabled(void)
716{
717 return hrtimer_hres_enabled;
718}
719
720
721
722
723
724
725static void retrigger_next_event(void *arg)
726{
727 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
728
729 if (!__hrtimer_hres_active(base))
730 return;
731
732 raw_spin_lock(&base->lock);
733 hrtimer_update_base(base);
734 hrtimer_force_reprogram(base, 0);
735 raw_spin_unlock(&base->lock);
736}
737
738
739
740
741static void hrtimer_switch_to_hres(void)
742{
743 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
744
745 if (tick_init_highres()) {
746 printk(KERN_WARNING "Could not switch to high resolution "
747 "mode on CPU %d\n", base->cpu);
748 return;
749 }
750 base->hres_active = 1;
751 hrtimer_resolution = HIGH_RES_NSEC;
752
753 tick_setup_sched_timer();
754
755 retrigger_next_event(NULL);
756}
757
758static void clock_was_set_work(struct work_struct *work)
759{
760 clock_was_set();
761}
762
763static DECLARE_WORK(hrtimer_work, clock_was_set_work);
764
765
766
767
768
769void clock_was_set_delayed(void)
770{
771 schedule_work(&hrtimer_work);
772}
773
774#else
775
776static inline int hrtimer_is_hres_enabled(void) { return 0; }
777static inline void hrtimer_switch_to_hres(void) { }
778static inline void retrigger_next_event(void *arg) { }
779
780#endif
781
782
783
784
785
786
787
788
789static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
790{
791 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
792 struct hrtimer_clock_base *base = timer->base;
793 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
794
795 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
796
797
798
799
800
801 if (expires < 0)
802 expires = 0;
803
804 if (timer->is_soft) {
805
806
807
808
809
810
811
812 struct hrtimer_cpu_base *timer_cpu_base = base->cpu_base;
813
814 if (timer_cpu_base->softirq_activated)
815 return;
816
817 if (!ktime_before(expires, timer_cpu_base->softirq_expires_next))
818 return;
819
820 timer_cpu_base->softirq_next_timer = timer;
821 timer_cpu_base->softirq_expires_next = expires;
822
823 if (!ktime_before(expires, timer_cpu_base->expires_next) ||
824 !reprogram)
825 return;
826 }
827
828
829
830
831
832 if (base->cpu_base != cpu_base)
833 return;
834
835
836
837
838
839
840
841
842 if (cpu_base->in_hrtirq)
843 return;
844
845 if (expires >= cpu_base->expires_next)
846 return;
847
848
849 cpu_base->next_timer = timer;
850 cpu_base->expires_next = expires;
851
852
853
854
855
856
857
858
859
860
861 if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
862 return;
863
864
865
866
867
868 tick_program_event(expires, 1);
869}
870
871
872
873
874
875
876
877
878
879
880
881
882void clock_was_set(void)
883{
884#ifdef CONFIG_HIGH_RES_TIMERS
885
886 on_each_cpu(retrigger_next_event, NULL, 1);
887#endif
888 timerfd_clock_was_set();
889}
890
891
892
893
894
895
896
897void hrtimers_resume(void)
898{
899 lockdep_assert_irqs_disabled();
900
901 retrigger_next_event(NULL);
902
903 clock_was_set_delayed();
904}
905
906
907
908
909static inline
910void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
911{
912 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
913}
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
933{
934 u64 orun = 1;
935 ktime_t delta;
936
937 delta = ktime_sub(now, hrtimer_get_expires(timer));
938
939 if (delta < 0)
940 return 0;
941
942 if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
943 return 0;
944
945 if (interval < hrtimer_resolution)
946 interval = hrtimer_resolution;
947
948 if (unlikely(delta >= interval)) {
949 s64 incr = ktime_to_ns(interval);
950
951 orun = ktime_divns(delta, incr);
952 hrtimer_add_expires_ns(timer, incr * orun);
953 if (hrtimer_get_expires_tv64(timer) > now)
954 return orun;
955
956
957
958
959 orun++;
960 }
961 hrtimer_add_expires(timer, interval);
962
963 return orun;
964}
965EXPORT_SYMBOL_GPL(hrtimer_forward);
966
967
968
969
970
971
972
973
974
975static int enqueue_hrtimer(struct hrtimer *timer,
976 struct hrtimer_clock_base *base,
977 enum hrtimer_mode mode)
978{
979 debug_activate(timer, mode);
980
981 base->cpu_base->active_bases |= 1 << base->index;
982
983 timer->state = HRTIMER_STATE_ENQUEUED;
984
985 return timerqueue_add(&base->active, &timer->node);
986}
987
988
989
990
991
992
993
994
995
996
997
998static void __remove_hrtimer(struct hrtimer *timer,
999 struct hrtimer_clock_base *base,
1000 u8 newstate, int reprogram)
1001{
1002 struct hrtimer_cpu_base *cpu_base = base->cpu_base;
1003 u8 state = timer->state;
1004
1005 timer->state = newstate;
1006 if (!(state & HRTIMER_STATE_ENQUEUED))
1007 return;
1008
1009 if (!timerqueue_del(&base->active, &timer->node))
1010 cpu_base->active_bases &= ~(1 << base->index);
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020 if (reprogram && timer == cpu_base->next_timer)
1021 hrtimer_force_reprogram(cpu_base, 1);
1022}
1023
1024
1025
1026
1027static inline int
1028remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
1029{
1030 if (hrtimer_is_queued(timer)) {
1031 u8 state = timer->state;
1032 int reprogram;
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 debug_deactivate(timer);
1043 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
1044
1045 if (!restart)
1046 state = HRTIMER_STATE_INACTIVE;
1047
1048 __remove_hrtimer(timer, base, state, reprogram);
1049 return 1;
1050 }
1051 return 0;
1052}
1053
1054static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
1055 const enum hrtimer_mode mode)
1056{
1057#ifdef CONFIG_TIME_LOW_RES
1058
1059
1060
1061
1062
1063 timer->is_rel = mode & HRTIMER_MODE_REL;
1064 if (timer->is_rel)
1065 tim = ktime_add_safe(tim, hrtimer_resolution);
1066#endif
1067 return tim;
1068}
1069
1070static void
1071hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
1072{
1073 ktime_t expires;
1074
1075
1076
1077
1078 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
1079
1080
1081
1082
1083
1084
1085 if (expires == KTIME_MAX)
1086 return;
1087
1088
1089
1090
1091
1092 hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
1093}
1094
1095static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1096 u64 delta_ns, const enum hrtimer_mode mode,
1097 struct hrtimer_clock_base *base)
1098{
1099 struct hrtimer_clock_base *new_base;
1100
1101
1102 remove_hrtimer(timer, base, true);
1103
1104 if (mode & HRTIMER_MODE_REL)
1105 tim = ktime_add_safe(tim, base->get_time());
1106
1107 tim = hrtimer_update_lowres(timer, tim, mode);
1108
1109 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1110
1111
1112 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1113
1114 return enqueue_hrtimer(timer, new_base, mode);
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
1127 u64 delta_ns, const enum hrtimer_mode mode)
1128{
1129 struct hrtimer_clock_base *base;
1130 unsigned long flags;
1131
1132
1133
1134
1135
1136
1137 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1138 WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
1139 else
1140 WARN_ON_ONCE(!(mode & HRTIMER_MODE_HARD) ^ !timer->is_hard);
1141
1142 base = lock_hrtimer_base(timer, &flags);
1143
1144 if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
1145 hrtimer_reprogram(timer, true);
1146
1147 unlock_hrtimer_base(timer, &flags);
1148}
1149EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161int hrtimer_try_to_cancel(struct hrtimer *timer)
1162{
1163 struct hrtimer_clock_base *base;
1164 unsigned long flags;
1165 int ret = -1;
1166
1167
1168
1169
1170
1171
1172
1173 if (!hrtimer_active(timer))
1174 return 0;
1175
1176 base = lock_hrtimer_base(timer, &flags);
1177
1178 if (!hrtimer_callback_running(timer))
1179 ret = remove_hrtimer(timer, base, false);
1180
1181 unlock_hrtimer_base(timer, &flags);
1182
1183 return ret;
1184
1185}
1186EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1187
1188#ifdef CONFIG_PREEMPT_RT
1189static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
1190{
1191 spin_lock_init(&base->softirq_expiry_lock);
1192}
1193
1194static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
1195{
1196 spin_lock(&base->softirq_expiry_lock);
1197}
1198
1199static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
1200{
1201 spin_unlock(&base->softirq_expiry_lock);
1202}
1203
1204
1205
1206
1207
1208
1209
1210
1211static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
1212 unsigned long flags)
1213{
1214 if (atomic_read(&cpu_base->timer_waiters)) {
1215 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1216 spin_unlock(&cpu_base->softirq_expiry_lock);
1217 spin_lock(&cpu_base->softirq_expiry_lock);
1218 raw_spin_lock_irq(&cpu_base->lock);
1219 }
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238void hrtimer_cancel_wait_running(const struct hrtimer *timer)
1239{
1240
1241 struct hrtimer_clock_base *base = READ_ONCE(timer->base);
1242
1243
1244
1245
1246
1247 if (!timer->is_soft || is_migration_base(base)) {
1248 cpu_relax();
1249 return;
1250 }
1251
1252
1253
1254
1255
1256
1257
1258
1259 atomic_inc(&base->cpu_base->timer_waiters);
1260 spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
1261 atomic_dec(&base->cpu_base->timer_waiters);
1262 spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
1263}
1264#else
1265static inline void
1266hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
1267static inline void
1268hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
1269static inline void
1270hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
1271static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
1272 unsigned long flags) { }
1273#endif
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283int hrtimer_cancel(struct hrtimer *timer)
1284{
1285 int ret;
1286
1287 do {
1288 ret = hrtimer_try_to_cancel(timer);
1289
1290 if (ret < 0)
1291 hrtimer_cancel_wait_running(timer);
1292 } while (ret < 0);
1293 return ret;
1294}
1295EXPORT_SYMBOL_GPL(hrtimer_cancel);
1296
1297
1298
1299
1300
1301
1302ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
1303{
1304 unsigned long flags;
1305 ktime_t rem;
1306
1307 lock_hrtimer_base(timer, &flags);
1308 if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
1309 rem = hrtimer_expires_remaining_adjusted(timer);
1310 else
1311 rem = hrtimer_expires_remaining(timer);
1312 unlock_hrtimer_base(timer, &flags);
1313
1314 return rem;
1315}
1316EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
1317
1318#ifdef CONFIG_NO_HZ_COMMON
1319
1320
1321
1322
1323
1324u64 hrtimer_get_next_event(void)
1325{
1326 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1327 u64 expires = KTIME_MAX;
1328 unsigned long flags;
1329
1330 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1331
1332 if (!__hrtimer_hres_active(cpu_base))
1333 expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1334
1335 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1336
1337 return expires;
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347u64 hrtimer_next_event_without(const struct hrtimer *exclude)
1348{
1349 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1350 u64 expires = KTIME_MAX;
1351 unsigned long flags;
1352
1353 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1354
1355 if (__hrtimer_hres_active(cpu_base)) {
1356 unsigned int active;
1357
1358 if (!cpu_base->softirq_activated) {
1359 active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
1360 expires = __hrtimer_next_event_base(cpu_base, exclude,
1361 active, KTIME_MAX);
1362 }
1363 active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
1364 expires = __hrtimer_next_event_base(cpu_base, exclude, active,
1365 expires);
1366 }
1367
1368 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1369
1370 return expires;
1371}
1372#endif
1373
1374static inline int hrtimer_clockid_to_base(clockid_t clock_id)
1375{
1376 if (likely(clock_id < MAX_CLOCKS)) {
1377 int base = hrtimer_clock_to_base_table[clock_id];
1378
1379 if (likely(base != HRTIMER_MAX_CLOCK_BASES))
1380 return base;
1381 }
1382 WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
1383 return HRTIMER_BASE_MONOTONIC;
1384}
1385
1386static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1387 enum hrtimer_mode mode)
1388{
1389 bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
1390 struct hrtimer_cpu_base *cpu_base;
1391 int base;
1392
1393
1394
1395
1396
1397
1398
1399 if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(mode & HRTIMER_MODE_HARD))
1400 softtimer = true;
1401
1402 memset(timer, 0, sizeof(struct hrtimer));
1403
1404 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1405
1406
1407
1408
1409
1410
1411 if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
1412 clock_id = CLOCK_MONOTONIC;
1413
1414 base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
1415 base += hrtimer_clockid_to_base(clock_id);
1416 timer->is_soft = softtimer;
1417 timer->is_hard = !!(mode & HRTIMER_MODE_HARD);
1418 timer->base = &cpu_base->clock_base[base];
1419 timerqueue_init(&timer->node);
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1435 enum hrtimer_mode mode)
1436{
1437 debug_init(timer, clock_id, mode);
1438 __hrtimer_init(timer, clock_id, mode);
1439}
1440EXPORT_SYMBOL_GPL(hrtimer_init);
1441
1442
1443
1444
1445
1446
1447
1448
1449bool hrtimer_active(const struct hrtimer *timer)
1450{
1451 struct hrtimer_clock_base *base;
1452 unsigned int seq;
1453
1454 do {
1455 base = READ_ONCE(timer->base);
1456 seq = raw_read_seqcount_begin(&base->seq);
1457
1458 if (timer->state != HRTIMER_STATE_INACTIVE ||
1459 base->running == timer)
1460 return true;
1461
1462 } while (read_seqcount_retry(&base->seq, seq) ||
1463 base != READ_ONCE(timer->base));
1464
1465 return false;
1466}
1467EXPORT_SYMBOL_GPL(hrtimer_active);
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1488 struct hrtimer_clock_base *base,
1489 struct hrtimer *timer, ktime_t *now,
1490 unsigned long flags) __must_hold(&cpu_base->lock)
1491{
1492 enum hrtimer_restart (*fn)(struct hrtimer *);
1493 bool expires_in_hardirq;
1494 int restart;
1495
1496 lockdep_assert_held(&cpu_base->lock);
1497
1498 debug_deactivate(timer);
1499 base->running = timer;
1500
1501
1502
1503
1504
1505
1506
1507
1508 raw_write_seqcount_barrier(&base->seq);
1509
1510 __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
1511 fn = timer->function;
1512
1513
1514
1515
1516
1517
1518 if (IS_ENABLED(CONFIG_TIME_LOW_RES))
1519 timer->is_rel = false;
1520
1521
1522
1523
1524
1525
1526 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1527 trace_hrtimer_expire_entry(timer, now);
1528 expires_in_hardirq = lockdep_hrtimer_enter(timer);
1529
1530 restart = fn(timer);
1531
1532 lockdep_hrtimer_exit(expires_in_hardirq);
1533 trace_hrtimer_expire_exit(timer);
1534 raw_spin_lock_irq(&cpu_base->lock);
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545 if (restart != HRTIMER_NORESTART &&
1546 !(timer->state & HRTIMER_STATE_ENQUEUED))
1547 enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
1548
1549
1550
1551
1552
1553
1554
1555
1556 raw_write_seqcount_barrier(&base->seq);
1557
1558 WARN_ON_ONCE(base->running != timer);
1559 base->running = NULL;
1560}
1561
1562static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
1563 unsigned long flags, unsigned int active_mask)
1564{
1565 struct hrtimer_clock_base *base;
1566 unsigned int active = cpu_base->active_bases & active_mask;
1567
1568 for_each_active_base(base, cpu_base, active) {
1569 struct timerqueue_node *node;
1570 ktime_t basenow;
1571
1572 basenow = ktime_add(now, base->offset);
1573
1574 while ((node = timerqueue_getnext(&base->active))) {
1575 struct hrtimer *timer;
1576
1577 timer = container_of(node, struct hrtimer, node);
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591 if (basenow < hrtimer_get_softexpires_tv64(timer))
1592 break;
1593
1594 __run_hrtimer(cpu_base, base, timer, &basenow, flags);
1595 if (active_mask == HRTIMER_ACTIVE_SOFT)
1596 hrtimer_sync_wait_running(cpu_base, flags);
1597 }
1598 }
1599}
1600
1601static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
1602{
1603 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1604 unsigned long flags;
1605 ktime_t now;
1606
1607 hrtimer_cpu_base_lock_expiry(cpu_base);
1608 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1609
1610 now = hrtimer_update_base(cpu_base);
1611 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
1612
1613 cpu_base->softirq_activated = 0;
1614 hrtimer_update_softirq_timer(cpu_base, true);
1615
1616 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1617 hrtimer_cpu_base_unlock_expiry(cpu_base);
1618}
1619
1620#ifdef CONFIG_HIGH_RES_TIMERS
1621
1622
1623
1624
1625
1626void hrtimer_interrupt(struct clock_event_device *dev)
1627{
1628 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1629 ktime_t expires_next, now, entry_time, delta;
1630 unsigned long flags;
1631 int retries = 0;
1632
1633 BUG_ON(!cpu_base->hres_active);
1634 cpu_base->nr_events++;
1635 dev->next_event = KTIME_MAX;
1636
1637 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1638 entry_time = now = hrtimer_update_base(cpu_base);
1639retry:
1640 cpu_base->in_hrtirq = 1;
1641
1642
1643
1644
1645
1646
1647
1648 cpu_base->expires_next = KTIME_MAX;
1649
1650 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1651 cpu_base->softirq_expires_next = KTIME_MAX;
1652 cpu_base->softirq_activated = 1;
1653 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1654 }
1655
1656 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1657
1658
1659 expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
1660
1661
1662
1663
1664 cpu_base->expires_next = expires_next;
1665 cpu_base->in_hrtirq = 0;
1666 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1667
1668
1669 if (!tick_program_event(expires_next, 0)) {
1670 cpu_base->hang_detected = 0;
1671 return;
1672 }
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1688 now = hrtimer_update_base(cpu_base);
1689 cpu_base->nr_retries++;
1690 if (++retries < 3)
1691 goto retry;
1692
1693
1694
1695
1696
1697
1698 cpu_base->nr_hangs++;
1699 cpu_base->hang_detected = 1;
1700 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1701
1702 delta = ktime_sub(now, entry_time);
1703 if ((unsigned int)delta > cpu_base->max_hang_time)
1704 cpu_base->max_hang_time = (unsigned int) delta;
1705
1706
1707
1708
1709 if (delta > 100 * NSEC_PER_MSEC)
1710 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1711 else
1712 expires_next = ktime_add(now, delta);
1713 tick_program_event(expires_next, 1);
1714 printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
1715 ktime_to_ns(delta));
1716}
1717
1718
1719static inline void __hrtimer_peek_ahead_timers(void)
1720{
1721 struct tick_device *td;
1722
1723 if (!hrtimer_hres_active())
1724 return;
1725
1726 td = this_cpu_ptr(&tick_cpu_device);
1727 if (td && td->evtdev)
1728 hrtimer_interrupt(td->evtdev);
1729}
1730
1731#else
1732
1733static inline void __hrtimer_peek_ahead_timers(void) { }
1734
1735#endif
1736
1737
1738
1739
1740void hrtimer_run_queues(void)
1741{
1742 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1743 unsigned long flags;
1744 ktime_t now;
1745
1746 if (__hrtimer_hres_active(cpu_base))
1747 return;
1748
1749
1750
1751
1752
1753
1754
1755
1756 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1757 hrtimer_switch_to_hres();
1758 return;
1759 }
1760
1761 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1762 now = hrtimer_update_base(cpu_base);
1763
1764 if (!ktime_before(now, cpu_base->softirq_expires_next)) {
1765 cpu_base->softirq_expires_next = KTIME_MAX;
1766 cpu_base->softirq_activated = 1;
1767 raise_softirq_irqoff(HRTIMER_SOFTIRQ);
1768 }
1769
1770 __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
1771 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1772}
1773
1774
1775
1776
1777static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1778{
1779 struct hrtimer_sleeper *t =
1780 container_of(timer, struct hrtimer_sleeper, timer);
1781 struct task_struct *task = t->task;
1782
1783 t->task = NULL;
1784 if (task)
1785 wake_up_process(task);
1786
1787 return HRTIMER_NORESTART;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798void hrtimer_sleeper_start_expires(struct hrtimer_sleeper *sl,
1799 enum hrtimer_mode mode)
1800{
1801
1802
1803
1804
1805
1806
1807
1808 if (IS_ENABLED(CONFIG_PREEMPT_RT) && sl->timer.is_hard)
1809 mode |= HRTIMER_MODE_HARD;
1810
1811 hrtimer_start_expires(&sl->timer, mode);
1812}
1813EXPORT_SYMBOL_GPL(hrtimer_sleeper_start_expires);
1814
1815static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
1816 clockid_t clock_id, enum hrtimer_mode mode)
1817{
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1838 if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT))
1839 mode |= HRTIMER_MODE_HARD;
1840 }
1841
1842 __hrtimer_init(&sl->timer, clock_id, mode);
1843 sl->timer.function = hrtimer_wakeup;
1844 sl->task = current;
1845}
1846
1847
1848
1849
1850
1851
1852
1853void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
1854 enum hrtimer_mode mode)
1855{
1856 debug_init(&sl->timer, clock_id, mode);
1857 __hrtimer_init_sleeper(sl, clock_id, mode);
1858
1859}
1860EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1861
1862int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
1863{
1864 switch(restart->nanosleep.type) {
1865#ifdef CONFIG_COMPAT_32BIT_TIME
1866 case TT_COMPAT:
1867 if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
1868 return -EFAULT;
1869 break;
1870#endif
1871 case TT_NATIVE:
1872 if (put_timespec64(ts, restart->nanosleep.rmtp))
1873 return -EFAULT;
1874 break;
1875 default:
1876 BUG();
1877 }
1878 return -ERESTART_RESTARTBLOCK;
1879}
1880
1881static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1882{
1883 struct restart_block *restart;
1884
1885 do {
1886 set_current_state(TASK_INTERRUPTIBLE);
1887 hrtimer_sleeper_start_expires(t, mode);
1888
1889 if (likely(t->task))
1890 freezable_schedule();
1891
1892 hrtimer_cancel(&t->timer);
1893 mode = HRTIMER_MODE_ABS;
1894
1895 } while (t->task && !signal_pending(current));
1896
1897 __set_current_state(TASK_RUNNING);
1898
1899 if (!t->task)
1900 return 0;
1901
1902 restart = ¤t->restart_block;
1903 if (restart->nanosleep.type != TT_NONE) {
1904 ktime_t rem = hrtimer_expires_remaining(&t->timer);
1905 struct timespec64 rmt;
1906
1907 if (rem <= 0)
1908 return 0;
1909 rmt = ktime_to_timespec64(rem);
1910
1911 return nanosleep_copyout(restart, &rmt);
1912 }
1913 return -ERESTART_RESTARTBLOCK;
1914}
1915
1916static long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1917{
1918 struct hrtimer_sleeper t;
1919 int ret;
1920
1921 hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
1922 HRTIMER_MODE_ABS);
1923 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1924 ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
1925 destroy_hrtimer_on_stack(&t.timer);
1926 return ret;
1927}
1928
1929long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
1930 const clockid_t clockid)
1931{
1932 struct restart_block *restart;
1933 struct hrtimer_sleeper t;
1934 int ret = 0;
1935 u64 slack;
1936
1937 slack = current->timer_slack_ns;
1938 if (dl_task(current) || rt_task(current))
1939 slack = 0;
1940
1941 hrtimer_init_sleeper_on_stack(&t, clockid, mode);
1942 hrtimer_set_expires_range_ns(&t.timer, rqtp, slack);
1943 ret = do_nanosleep(&t, mode);
1944 if (ret != -ERESTART_RESTARTBLOCK)
1945 goto out;
1946
1947
1948 if (mode == HRTIMER_MODE_ABS) {
1949 ret = -ERESTARTNOHAND;
1950 goto out;
1951 }
1952
1953 restart = ¤t->restart_block;
1954 restart->fn = hrtimer_nanosleep_restart;
1955 restart->nanosleep.clockid = t.timer.base->clockid;
1956 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1957out:
1958 destroy_hrtimer_on_stack(&t.timer);
1959 return ret;
1960}
1961
1962#if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT)
1963
1964SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
1965 struct __kernel_timespec __user *, rmtp)
1966{
1967 struct timespec64 tu;
1968
1969 if (get_timespec64(&tu, rqtp))
1970 return -EFAULT;
1971
1972 if (!timespec64_valid(&tu))
1973 return -EINVAL;
1974
1975 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
1976 current->restart_block.nanosleep.rmtp = rmtp;
1977 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
1978 CLOCK_MONOTONIC);
1979}
1980
1981#endif
1982
1983#ifdef CONFIG_COMPAT_32BIT_TIME
1984
1985COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
1986 struct compat_timespec __user *, rmtp)
1987{
1988 struct timespec64 tu;
1989
1990 if (compat_get_timespec64(&tu, rqtp))
1991 return -EFAULT;
1992
1993 if (!timespec64_valid(&tu))
1994 return -EINVAL;
1995
1996 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
1997 current->restart_block.nanosleep.compat_rmtp = rmtp;
1998 return hrtimer_nanosleep(timespec64_to_ktime(tu), HRTIMER_MODE_REL,
1999 CLOCK_MONOTONIC);
2000}
2001#endif
2002
2003
2004
2005
2006int hrtimers_prepare_cpu(unsigned int cpu)
2007{
2008 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
2009 int i;
2010
2011 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2012 struct hrtimer_clock_base *clock_b = &cpu_base->clock_base[i];
2013
2014 clock_b->cpu_base = cpu_base;
2015 seqcount_raw_spinlock_init(&clock_b->seq, &cpu_base->lock);
2016 timerqueue_init_head(&clock_b->active);
2017 }
2018
2019 cpu_base->cpu = cpu;
2020 cpu_base->active_bases = 0;
2021 cpu_base->hres_active = 0;
2022 cpu_base->hang_detected = 0;
2023 cpu_base->next_timer = NULL;
2024 cpu_base->softirq_next_timer = NULL;
2025 cpu_base->expires_next = KTIME_MAX;
2026 cpu_base->softirq_expires_next = KTIME_MAX;
2027 hrtimer_cpu_base_init_expiry_lock(cpu_base);
2028 return 0;
2029}
2030
2031#ifdef CONFIG_HOTPLUG_CPU
2032
2033static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
2034 struct hrtimer_clock_base *new_base)
2035{
2036 struct hrtimer *timer;
2037 struct timerqueue_node *node;
2038
2039 while ((node = timerqueue_getnext(&old_base->active))) {
2040 timer = container_of(node, struct hrtimer, node);
2041 BUG_ON(hrtimer_callback_running(timer));
2042 debug_deactivate(timer);
2043
2044
2045
2046
2047
2048
2049 __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
2050 timer->base = new_base;
2051
2052
2053
2054
2055
2056
2057
2058
2059 enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
2060 }
2061}
2062
2063int hrtimers_dead_cpu(unsigned int scpu)
2064{
2065 struct hrtimer_cpu_base *old_base, *new_base;
2066 int i;
2067
2068 BUG_ON(cpu_online(scpu));
2069 tick_cancel_sched_timer(scpu);
2070
2071
2072
2073
2074
2075
2076 local_bh_disable();
2077 local_irq_disable();
2078 old_base = &per_cpu(hrtimer_bases, scpu);
2079 new_base = this_cpu_ptr(&hrtimer_bases);
2080
2081
2082
2083
2084 raw_spin_lock(&new_base->lock);
2085 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
2086
2087 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
2088 migrate_hrtimer_list(&old_base->clock_base[i],
2089 &new_base->clock_base[i]);
2090 }
2091
2092
2093
2094
2095
2096 hrtimer_update_softirq_timer(new_base, false);
2097
2098 raw_spin_unlock(&old_base->lock);
2099 raw_spin_unlock(&new_base->lock);
2100
2101
2102 __hrtimer_peek_ahead_timers();
2103 local_irq_enable();
2104 local_bh_enable();
2105 return 0;
2106}
2107
2108#endif
2109
2110void __init hrtimers_init(void)
2111{
2112 hrtimers_prepare_cpu(smp_processor_id());
2113 open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
2114}
2115
2116
2117
2118
2119
2120
2121
2122
2123int __sched
2124schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
2125 const enum hrtimer_mode mode, clockid_t clock_id)
2126{
2127 struct hrtimer_sleeper t;
2128
2129
2130
2131
2132
2133 if (expires && *expires == 0) {
2134 __set_current_state(TASK_RUNNING);
2135 return 0;
2136 }
2137
2138
2139
2140
2141 if (!expires) {
2142 schedule();
2143 return -EINTR;
2144 }
2145
2146 hrtimer_init_sleeper_on_stack(&t, clock_id, mode);
2147 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
2148 hrtimer_sleeper_start_expires(&t, mode);
2149
2150 if (likely(t.task))
2151 schedule();
2152
2153 hrtimer_cancel(&t.timer);
2154 destroy_hrtimer_on_stack(&t.timer);
2155
2156 __set_current_state(TASK_RUNNING);
2157
2158 return !t.task ? 0 : -EINTR;
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
2194 const enum hrtimer_mode mode)
2195{
2196 return schedule_hrtimeout_range_clock(expires, delta, mode,
2197 CLOCK_MONOTONIC);
2198}
2199EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227int __sched schedule_hrtimeout(ktime_t *expires,
2228 const enum hrtimer_mode mode)
2229{
2230 return schedule_hrtimeout_range(expires, 0, mode);
2231}
2232EXPORT_SYMBOL_GPL(schedule_hrtimeout);
2233