1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/cpu.h>
35#include <linux/export.h>
36#include <linux/percpu.h>
37#include <linux/hrtimer.h>
38#include <linux/notifier.h>
39#include <linux/syscalls.h>
40#include <linux/kallsyms.h>
41#include <linux/interrupt.h>
42#include <linux/tick.h>
43#include <linux/seq_file.h>
44#include <linux/err.h>
45#include <linux/debugobjects.h>
46#include <linux/sched.h>
47#include <linux/sched/sysctl.h>
48#include <linux/sched/rt.h>
49#include <linux/sched/deadline.h>
50#include <linux/timer.h>
51#include <linux/freezer.h>
52
53#include <asm/uaccess.h>
54
55#include <trace/events/timer.h>
56
57#include "time/timekeeping.h"
58
59
60
61
62
63
64
65
66
67DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
68{
69
70 .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
71 .clock_base =
72 {
73 {
74 .index = HRTIMER_BASE_MONOTONIC,
75 .clockid = CLOCK_MONOTONIC,
76 .get_time = &ktime_get,
77 .resolution = KTIME_LOW_RES,
78 },
79 {
80 .index = HRTIMER_BASE_REALTIME,
81 .clockid = CLOCK_REALTIME,
82 .get_time = &ktime_get_real,
83 .resolution = KTIME_LOW_RES,
84 },
85 {
86 .index = HRTIMER_BASE_BOOTTIME,
87 .clockid = CLOCK_BOOTTIME,
88 .get_time = &ktime_get_boottime,
89 .resolution = KTIME_LOW_RES,
90 },
91 {
92 .index = HRTIMER_BASE_TAI,
93 .clockid = CLOCK_TAI,
94 .get_time = &ktime_get_clocktai,
95 .resolution = KTIME_LOW_RES,
96 },
97 }
98};
99
100static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
101 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
102 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
103 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
104 [CLOCK_TAI] = HRTIMER_BASE_TAI,
105};
106
107static inline int hrtimer_clockid_to_base(clockid_t clock_id)
108{
109 return hrtimer_clock_to_base_table[clock_id];
110}
111
112
113
114
115
116#ifdef CONFIG_SMP
117
118
119
120
121
122
123
124
125
126
127
128
129
130static
131struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
132 unsigned long *flags)
133{
134 struct hrtimer_clock_base *base;
135
136 for (;;) {
137 base = timer->base;
138 if (likely(base != NULL)) {
139 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
140 if (likely(base == timer->base))
141 return base;
142
143 raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
144 }
145 cpu_relax();
146 }
147}
148
149
150
151
152
153static int hrtimer_get_target(int this_cpu, int pinned)
154{
155#ifdef CONFIG_NO_HZ_COMMON
156 if (!pinned && get_sysctl_timer_migration())
157 return get_nohz_timer_target();
158#endif
159 return this_cpu;
160}
161
162
163
164
165
166
167
168
169static int
170hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
171{
172#ifdef CONFIG_HIGH_RES_TIMERS
173 ktime_t expires;
174
175 if (!new_base->cpu_base->hres_active)
176 return 0;
177
178 expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
179 return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
180#else
181 return 0;
182#endif
183}
184
185
186
187
188static inline struct hrtimer_clock_base *
189switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
190 int pinned)
191{
192 struct hrtimer_clock_base *new_base;
193 struct hrtimer_cpu_base *new_cpu_base;
194 int this_cpu = smp_processor_id();
195 int cpu = hrtimer_get_target(this_cpu, pinned);
196 int basenum = base->index;
197
198again:
199 new_cpu_base = &per_cpu(hrtimer_bases, cpu);
200 new_base = &new_cpu_base->clock_base[basenum];
201
202 if (base != new_base) {
203
204
205
206
207
208
209
210
211
212 if (unlikely(hrtimer_callback_running(timer)))
213 return base;
214
215
216 timer->base = NULL;
217 raw_spin_unlock(&base->cpu_base->lock);
218 raw_spin_lock(&new_base->cpu_base->lock);
219
220 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
221 cpu = this_cpu;
222 raw_spin_unlock(&new_base->cpu_base->lock);
223 raw_spin_lock(&base->cpu_base->lock);
224 timer->base = base;
225 goto again;
226 }
227 timer->base = new_base;
228 } else {
229 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
230 cpu = this_cpu;
231 goto again;
232 }
233 }
234 return new_base;
235}
236
237#else
238
239static inline struct hrtimer_clock_base *
240lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
241{
242 struct hrtimer_clock_base *base = timer->base;
243
244 raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
245
246 return base;
247}
248
249# define switch_hrtimer_base(t, b, p) (b)
250
251#endif
252
253
254
255
256
257#if BITS_PER_LONG < 64
258# ifndef CONFIG_KTIME_SCALAR
259
260
261
262
263
264
265
266ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
267{
268 ktime_t tmp;
269
270 if (likely(nsec < NSEC_PER_SEC)) {
271 tmp.tv64 = nsec;
272 } else {
273 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
274
275
276 if (unlikely(nsec > KTIME_SEC_MAX))
277 return (ktime_t){ .tv64 = KTIME_MAX };
278
279 tmp = ktime_set((long)nsec, rem);
280 }
281
282 return ktime_add(kt, tmp);
283}
284
285EXPORT_SYMBOL_GPL(ktime_add_ns);
286
287
288
289
290
291
292
293
294ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
295{
296 ktime_t tmp;
297
298 if (likely(nsec < NSEC_PER_SEC)) {
299 tmp.tv64 = nsec;
300 } else {
301 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
302
303 tmp = ktime_set((long)nsec, rem);
304 }
305
306 return ktime_sub(kt, tmp);
307}
308
309EXPORT_SYMBOL_GPL(ktime_sub_ns);
310# endif
311
312
313
314
315u64 ktime_divns(const ktime_t kt, s64 div)
316{
317 u64 dclc;
318 int sft = 0;
319
320 dclc = ktime_to_ns(kt);
321
322 while (div >> 32) {
323 sft++;
324 div >>= 1;
325 }
326 dclc >>= sft;
327 do_div(dclc, (unsigned long) div);
328
329 return dclc;
330}
331#endif
332
333
334
335
336ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
337{
338 ktime_t res = ktime_add(lhs, rhs);
339
340
341
342
343
344 if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
345 res = ktime_set(KTIME_SEC_MAX, 0);
346
347 return res;
348}
349
350EXPORT_SYMBOL_GPL(ktime_add_safe);
351
352#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
353
354static struct debug_obj_descr hrtimer_debug_descr;
355
356static void *hrtimer_debug_hint(void *addr)
357{
358 return ((struct hrtimer *) addr)->function;
359}
360
361
362
363
364
365static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
366{
367 struct hrtimer *timer = addr;
368
369 switch (state) {
370 case ODEBUG_STATE_ACTIVE:
371 hrtimer_cancel(timer);
372 debug_object_init(timer, &hrtimer_debug_descr);
373 return 1;
374 default:
375 return 0;
376 }
377}
378
379
380
381
382
383
384static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
385{
386 switch (state) {
387
388 case ODEBUG_STATE_NOTAVAILABLE:
389 WARN_ON_ONCE(1);
390 return 0;
391
392 case ODEBUG_STATE_ACTIVE:
393 WARN_ON(1);
394
395 default:
396 return 0;
397 }
398}
399
400
401
402
403
404static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
405{
406 struct hrtimer *timer = addr;
407
408 switch (state) {
409 case ODEBUG_STATE_ACTIVE:
410 hrtimer_cancel(timer);
411 debug_object_free(timer, &hrtimer_debug_descr);
412 return 1;
413 default:
414 return 0;
415 }
416}
417
418static struct debug_obj_descr hrtimer_debug_descr = {
419 .name = "hrtimer",
420 .debug_hint = hrtimer_debug_hint,
421 .fixup_init = hrtimer_fixup_init,
422 .fixup_activate = hrtimer_fixup_activate,
423 .fixup_free = hrtimer_fixup_free,
424};
425
426static inline void debug_hrtimer_init(struct hrtimer *timer)
427{
428 debug_object_init(timer, &hrtimer_debug_descr);
429}
430
431static inline void debug_hrtimer_activate(struct hrtimer *timer)
432{
433 debug_object_activate(timer, &hrtimer_debug_descr);
434}
435
436static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
437{
438 debug_object_deactivate(timer, &hrtimer_debug_descr);
439}
440
441static inline void debug_hrtimer_free(struct hrtimer *timer)
442{
443 debug_object_free(timer, &hrtimer_debug_descr);
444}
445
446static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
447 enum hrtimer_mode mode);
448
449void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
450 enum hrtimer_mode mode)
451{
452 debug_object_init_on_stack(timer, &hrtimer_debug_descr);
453 __hrtimer_init(timer, clock_id, mode);
454}
455EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
456
457void destroy_hrtimer_on_stack(struct hrtimer *timer)
458{
459 debug_object_free(timer, &hrtimer_debug_descr);
460}
461
462#else
463static inline void debug_hrtimer_init(struct hrtimer *timer) { }
464static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
465static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
466#endif
467
468static inline void
469debug_init(struct hrtimer *timer, clockid_t clockid,
470 enum hrtimer_mode mode)
471{
472 debug_hrtimer_init(timer);
473 trace_hrtimer_init(timer, clockid, mode);
474}
475
476static inline void debug_activate(struct hrtimer *timer)
477{
478 debug_hrtimer_activate(timer);
479 trace_hrtimer_start(timer);
480}
481
482static inline void debug_deactivate(struct hrtimer *timer)
483{
484 debug_hrtimer_deactivate(timer);
485 trace_hrtimer_cancel(timer);
486}
487
488#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
489ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
490{
491 struct hrtimer_clock_base *base = cpu_base->clock_base;
492 ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
493 int i;
494
495 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
496 struct timerqueue_node *next;
497 struct hrtimer *timer;
498
499 next = timerqueue_getnext(&base->active);
500 if (!next)
501 continue;
502
503 timer = container_of(next, struct hrtimer, node);
504 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
505 if (expires.tv64 < expires_next.tv64)
506 expires_next = expires;
507 }
508
509
510
511
512
513 if (expires_next.tv64 < 0)
514 expires_next.tv64 = 0;
515 return expires_next;
516}
517#endif
518
519static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
520{
521 ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
522 ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
523 ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
524
525
526 return ktime_get_update_offsets_now(&base->clock_was_set,
527 offs_real, offs_boot, offs_tai);
528}
529
530
531#ifdef CONFIG_HIGH_RES_TIMERS
532
533
534
535
536static int hrtimer_hres_enabled __read_mostly = 1;
537
538
539
540
541static int __init setup_hrtimer_hres(char *str)
542{
543 if (!strcmp(str, "off"))
544 hrtimer_hres_enabled = 0;
545 else if (!strcmp(str, "on"))
546 hrtimer_hres_enabled = 1;
547 else
548 return 0;
549 return 1;
550}
551
552__setup("highres=", setup_hrtimer_hres);
553
554
555
556
557static inline int hrtimer_is_hres_enabled(void)
558{
559 return hrtimer_hres_enabled;
560}
561
562
563
564
565static inline int hrtimer_hres_active(void)
566{
567 return __this_cpu_read(hrtimer_bases.hres_active);
568}
569
570
571
572
573
574
575static void
576hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
577{
578 ktime_t expires_next;
579
580 if (!cpu_base->hres_active)
581 return;
582
583 expires_next = __hrtimer_get_next_event(cpu_base);
584
585 if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
586 return;
587
588 cpu_base->expires_next.tv64 = expires_next.tv64;
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604 if (cpu_base->hang_detected)
605 return;
606
607 if (cpu_base->expires_next.tv64 != KTIME_MAX)
608 tick_program_event(cpu_base->expires_next, 1);
609}
610
611
612
613
614
615
616
617
618static void hrtimer_reprogram(struct hrtimer *timer,
619 struct hrtimer_clock_base *base)
620{
621 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
622 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
623
624 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
625
626
627
628
629
630 if (base->cpu_base != cpu_base)
631 return;
632
633
634
635
636
637
638
639
640 if (cpu_base->in_hrtirq)
641 return;
642
643
644
645
646
647 if (expires.tv64 < 0)
648 expires.tv64 = 0;
649
650 if (expires.tv64 >= cpu_base->expires_next.tv64)
651 return;
652
653
654
655
656
657
658
659 if (cpu_base->hang_detected)
660 return;
661
662
663
664
665
666 cpu_base->expires_next = expires;
667 tick_program_event(expires, 1);
668}
669
670
671
672
673static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
674{
675 base->expires_next.tv64 = KTIME_MAX;
676 base->hres_active = 0;
677}
678
679
680
681
682
683
684static void retrigger_next_event(void *arg)
685{
686 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
687
688 if (!hrtimer_hres_active())
689 return;
690
691 raw_spin_lock(&base->lock);
692 hrtimer_update_base(base);
693 hrtimer_force_reprogram(base, 0);
694 raw_spin_unlock(&base->lock);
695}
696
697
698
699
700static int hrtimer_switch_to_hres(void)
701{
702 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
703 int i;
704
705 if (tick_init_highres()) {
706 printk(KERN_WARNING "Could not switch to high resolution "
707 "mode on CPU %d\n", base->cpu);
708 return 0;
709 }
710 base->hres_active = 1;
711 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
712 base->clock_base[i].resolution = KTIME_HIGH_RES;
713
714 tick_setup_sched_timer();
715
716 retrigger_next_event(NULL);
717 return 1;
718}
719
720static void clock_was_set_work(struct work_struct *work)
721{
722 clock_was_set();
723}
724
725static DECLARE_WORK(hrtimer_work, clock_was_set_work);
726
727
728
729
730
731void clock_was_set_delayed(void)
732{
733 schedule_work(&hrtimer_work);
734}
735
736#else
737
738static inline int hrtimer_hres_active(void) { return 0; }
739static inline int hrtimer_is_hres_enabled(void) { return 0; }
740static inline int hrtimer_switch_to_hres(void) { return 0; }
741static inline void
742hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
743static inline int hrtimer_reprogram(struct hrtimer *timer,
744 struct hrtimer_clock_base *base)
745{
746 return 0;
747}
748static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
749static inline void retrigger_next_event(void *arg) { }
750
751#endif
752
753
754
755
756
757
758
759
760
761
762
763
764void clock_was_set(void)
765{
766#ifdef CONFIG_HIGH_RES_TIMERS
767
768 on_each_cpu(retrigger_next_event, NULL, 1);
769#endif
770 timerfd_clock_was_set();
771}
772
773
774
775
776
777void hrtimers_resume(void)
778{
779 WARN_ONCE(!irqs_disabled(),
780 KERN_INFO "hrtimers_resume() called with IRQs enabled!");
781
782
783 retrigger_next_event(NULL);
784
785 clock_was_set_delayed();
786}
787
788static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
789{
790#ifdef CONFIG_TIMER_STATS
791 if (timer->start_site)
792 return;
793 timer->start_site = __builtin_return_address(0);
794 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
795 timer->start_pid = current->pid;
796#endif
797}
798
799static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
800{
801#ifdef CONFIG_TIMER_STATS
802 timer->start_site = NULL;
803#endif
804}
805
806static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
807{
808#ifdef CONFIG_TIMER_STATS
809 if (likely(!timer_stats_active))
810 return;
811 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
812 timer->function, timer->start_comm, 0);
813#endif
814}
815
816
817
818
819static inline
820void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
821{
822 raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
823}
824
825
826
827
828
829
830
831
832
833
834u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
835{
836 u64 orun = 1;
837 ktime_t delta;
838
839 delta = ktime_sub(now, hrtimer_get_expires(timer));
840
841 if (delta.tv64 < 0)
842 return 0;
843
844 if (interval.tv64 < timer->base->resolution.tv64)
845 interval.tv64 = timer->base->resolution.tv64;
846
847 if (unlikely(delta.tv64 >= interval.tv64)) {
848 s64 incr = ktime_to_ns(interval);
849
850 orun = ktime_divns(delta, incr);
851 hrtimer_add_expires_ns(timer, incr * orun);
852 if (hrtimer_get_expires_tv64(timer) > now.tv64)
853 return orun;
854
855
856
857
858 orun++;
859 }
860 hrtimer_add_expires(timer, interval);
861
862 return orun;
863}
864EXPORT_SYMBOL_GPL(hrtimer_forward);
865
866
867
868
869
870
871
872
873
874static int enqueue_hrtimer(struct hrtimer *timer,
875 struct hrtimer_clock_base *base)
876{
877 debug_activate(timer);
878
879 timerqueue_add(&base->active, &timer->node);
880 base->cpu_base->active_bases |= 1 << base->index;
881
882
883
884
885
886 timer->state |= HRTIMER_STATE_ENQUEUED;
887
888 return (&timer->node == base->active.next);
889}
890
891
892
893
894
895
896
897
898
899
900
901static void __remove_hrtimer(struct hrtimer *timer,
902 struct hrtimer_clock_base *base,
903 unsigned long newstate, int reprogram)
904{
905 struct timerqueue_node *next_timer;
906 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
907 goto out;
908
909 next_timer = timerqueue_getnext(&base->active);
910 timerqueue_del(&base->active, &timer->node);
911 if (&timer->node == next_timer) {
912#ifdef CONFIG_HIGH_RES_TIMERS
913
914 if (reprogram && hrtimer_hres_active()) {
915 ktime_t expires;
916
917 expires = ktime_sub(hrtimer_get_expires(timer),
918 base->offset);
919 if (base->cpu_base->expires_next.tv64 == expires.tv64)
920 hrtimer_force_reprogram(base->cpu_base, 1);
921 }
922#endif
923 }
924 if (!timerqueue_getnext(&base->active))
925 base->cpu_base->active_bases &= ~(1 << base->index);
926out:
927 timer->state = newstate;
928}
929
930
931
932
933static inline int
934remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
935{
936 if (hrtimer_is_queued(timer)) {
937 unsigned long state;
938 int reprogram;
939
940
941
942
943
944
945
946
947
948 debug_deactivate(timer);
949 timer_stats_hrtimer_clear_start_info(timer);
950 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
951
952
953
954
955
956 state = timer->state & HRTIMER_STATE_CALLBACK;
957 __remove_hrtimer(timer, base, state, reprogram);
958 return 1;
959 }
960 return 0;
961}
962
963
964
965
966
967
968
969
970
971
972
973int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
974 unsigned long delta_ns, const enum hrtimer_mode mode)
975{
976 struct hrtimer_clock_base *base, *new_base;
977 unsigned long flags;
978 int leftmost;
979
980 base = lock_hrtimer_base(timer, &flags);
981
982
983 remove_hrtimer(timer, base);
984
985 if (mode & HRTIMER_MODE_REL) {
986 tim = ktime_add_safe(tim, base->get_time());
987
988
989
990
991
992
993
994#ifdef CONFIG_TIME_LOW_RES
995 tim = ktime_add_safe(tim, base->resolution);
996#endif
997 }
998
999 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
1000
1001
1002 new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
1003
1004 timer_stats_hrtimer_set_start_info(timer);
1005
1006 leftmost = enqueue_hrtimer(timer, new_base);
1007 if (!leftmost)
1008 goto unlock;
1009
1010 if (!hrtimer_is_hres_active(timer)) {
1011
1012
1013
1014
1015 wake_up_nohz_cpu(new_base->cpu_base->cpu);
1016 } else {
1017 hrtimer_reprogram(timer, new_base);
1018 }
1019unlock:
1020 unlock_hrtimer_base(timer, &flags);
1021
1022
1023
1024
1025
1026
1027 return 0;
1028}
1029EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042int
1043hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
1044{
1045 return hrtimer_start_range_ns(timer, tim, 0, mode);
1046}
1047EXPORT_SYMBOL_GPL(hrtimer_start);
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060int hrtimer_try_to_cancel(struct hrtimer *timer)
1061{
1062 struct hrtimer_clock_base *base;
1063 unsigned long flags;
1064 int ret = -1;
1065
1066
1067
1068
1069
1070
1071
1072 if (!hrtimer_active(timer))
1073 return 0;
1074
1075 base = lock_hrtimer_base(timer, &flags);
1076
1077 if (!hrtimer_callback_running(timer))
1078 ret = remove_hrtimer(timer, base);
1079
1080 unlock_hrtimer_base(timer, &flags);
1081
1082 return ret;
1083
1084}
1085EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095int hrtimer_cancel(struct hrtimer *timer)
1096{
1097 for (;;) {
1098 int ret = hrtimer_try_to_cancel(timer);
1099
1100 if (ret >= 0)
1101 return ret;
1102 cpu_relax();
1103 }
1104}
1105EXPORT_SYMBOL_GPL(hrtimer_cancel);
1106
1107
1108
1109
1110
1111ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1112{
1113 unsigned long flags;
1114 ktime_t rem;
1115
1116 lock_hrtimer_base(timer, &flags);
1117 rem = hrtimer_expires_remaining(timer);
1118 unlock_hrtimer_base(timer, &flags);
1119
1120 return rem;
1121}
1122EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1123
1124#ifdef CONFIG_NO_HZ_COMMON
1125
1126
1127
1128
1129
1130u64 hrtimer_get_next_event(void)
1131{
1132 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1133 u64 expires = KTIME_MAX;
1134 unsigned long flags;
1135
1136 raw_spin_lock_irqsave(&cpu_base->lock, flags);
1137
1138 if (!hrtimer_hres_active())
1139 expires = __hrtimer_get_next_event(cpu_base).tv64;
1140
1141 raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
1142
1143 return expires;
1144}
1145#endif
1146
1147static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1148 enum hrtimer_mode mode)
1149{
1150 struct hrtimer_cpu_base *cpu_base;
1151 int base;
1152
1153 memset(timer, 0, sizeof(struct hrtimer));
1154
1155 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1156
1157 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1158 clock_id = CLOCK_MONOTONIC;
1159
1160 base = hrtimer_clockid_to_base(clock_id);
1161 timer->base = &cpu_base->clock_base[base];
1162 timerqueue_init(&timer->node);
1163
1164#ifdef CONFIG_TIMER_STATS
1165 timer->start_site = NULL;
1166 timer->start_pid = -1;
1167 memset(timer->start_comm, 0, TASK_COMM_LEN);
1168#endif
1169}
1170
1171
1172
1173
1174
1175
1176
1177void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1178 enum hrtimer_mode mode)
1179{
1180 debug_init(timer, clock_id, mode);
1181 __hrtimer_init(timer, clock_id, mode);
1182}
1183EXPORT_SYMBOL_GPL(hrtimer_init);
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1194{
1195 struct hrtimer_cpu_base *cpu_base;
1196 int base = hrtimer_clockid_to_base(which_clock);
1197
1198 cpu_base = &__raw_get_cpu_var(hrtimer_bases);
1199 *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
1200
1201 return 0;
1202}
1203EXPORT_SYMBOL_GPL(hrtimer_get_res);
1204
1205static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
1206 struct hrtimer_clock_base *base,
1207 struct hrtimer *timer, ktime_t *now)
1208{
1209 enum hrtimer_restart (*fn)(struct hrtimer *);
1210 int restart;
1211
1212 WARN_ON(!irqs_disabled());
1213
1214 debug_deactivate(timer);
1215 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1216 timer_stats_account_hrtimer(timer);
1217 fn = timer->function;
1218
1219
1220
1221
1222
1223
1224 raw_spin_unlock(&cpu_base->lock);
1225 trace_hrtimer_expire_entry(timer, now);
1226 restart = fn(timer);
1227 trace_hrtimer_expire_exit(timer);
1228 raw_spin_lock(&cpu_base->lock);
1229
1230
1231
1232
1233
1234
1235 if (restart != HRTIMER_NORESTART) {
1236 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1237 enqueue_hrtimer(timer, base);
1238 }
1239
1240 WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
1241
1242 timer->state &= ~HRTIMER_STATE_CALLBACK;
1243}
1244
1245static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
1246{
1247 int i;
1248
1249 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1250 struct hrtimer_clock_base *base;
1251 struct timerqueue_node *node;
1252 ktime_t basenow;
1253
1254 if (!(cpu_base->active_bases & (1 << i)))
1255 continue;
1256
1257 base = cpu_base->clock_base + i;
1258 basenow = ktime_add(now, base->offset);
1259
1260 while ((node = timerqueue_getnext(&base->active))) {
1261 struct hrtimer *timer;
1262
1263 timer = container_of(node, struct hrtimer, node);
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
1278 break;
1279
1280 __run_hrtimer(cpu_base, base, timer, &basenow);
1281 }
1282 }
1283}
1284
1285#ifdef CONFIG_HIGH_RES_TIMERS
1286
1287
1288
1289
1290
1291void hrtimer_interrupt(struct clock_event_device *dev)
1292{
1293 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1294 ktime_t expires_next, now, entry_time, delta;
1295 int retries = 0;
1296
1297 BUG_ON(!cpu_base->hres_active);
1298 cpu_base->nr_events++;
1299 dev->next_event.tv64 = KTIME_MAX;
1300
1301 raw_spin_lock(&cpu_base->lock);
1302 entry_time = now = hrtimer_update_base(cpu_base);
1303retry:
1304 cpu_base->in_hrtirq = 1;
1305
1306
1307
1308
1309
1310
1311
1312 cpu_base->expires_next.tv64 = KTIME_MAX;
1313
1314 __hrtimer_run_queues(cpu_base, now);
1315
1316
1317 expires_next = __hrtimer_get_next_event(cpu_base);
1318
1319
1320
1321
1322 cpu_base->expires_next = expires_next;
1323 cpu_base->in_hrtirq = 0;
1324 raw_spin_unlock(&cpu_base->lock);
1325
1326
1327 if (expires_next.tv64 == KTIME_MAX ||
1328 !tick_program_event(expires_next, 0)) {
1329 cpu_base->hang_detected = 0;
1330 return;
1331 }
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346 raw_spin_lock(&cpu_base->lock);
1347 now = hrtimer_update_base(cpu_base);
1348 cpu_base->nr_retries++;
1349 if (++retries < 3)
1350 goto retry;
1351
1352
1353
1354
1355
1356
1357 cpu_base->nr_hangs++;
1358 cpu_base->hang_detected = 1;
1359 raw_spin_unlock(&cpu_base->lock);
1360 delta = ktime_sub(now, entry_time);
1361 if (delta.tv64 > cpu_base->max_hang_time.tv64)
1362 cpu_base->max_hang_time = delta;
1363
1364
1365
1366
1367 if (delta.tv64 > 100 * NSEC_PER_MSEC)
1368 expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
1369 else
1370 expires_next = ktime_add(now, delta);
1371 tick_program_event(expires_next, 1);
1372 printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
1373 ktime_to_ns(delta));
1374}
1375
1376
1377
1378
1379
1380static inline void __hrtimer_peek_ahead_timers(void)
1381{
1382 struct tick_device *td;
1383
1384 if (!hrtimer_hres_active())
1385 return;
1386
1387 td = &__get_cpu_var(tick_cpu_device);
1388 if (td && td->evtdev)
1389 hrtimer_interrupt(td->evtdev);
1390}
1391
1392#else
1393
1394static inline void __hrtimer_peek_ahead_timers(void) { }
1395
1396#endif
1397
1398
1399
1400
1401void hrtimer_run_queues(void)
1402{
1403 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1404 ktime_t now;
1405
1406 if (hrtimer_hres_active())
1407 return;
1408
1409
1410
1411
1412
1413
1414
1415
1416 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
1417 hrtimer_switch_to_hres();
1418 return;
1419 }
1420
1421 raw_spin_lock(&cpu_base->lock);
1422 now = hrtimer_update_base(cpu_base);
1423 __hrtimer_run_queues(cpu_base, now);
1424 raw_spin_unlock(&cpu_base->lock);
1425}
1426
1427
1428
1429
1430static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
1431{
1432 struct hrtimer_sleeper *t =
1433 container_of(timer, struct hrtimer_sleeper, timer);
1434 struct task_struct *task = t->task;
1435
1436 t->task = NULL;
1437 if (task)
1438 wake_up_process(task);
1439
1440 return HRTIMER_NORESTART;
1441}
1442
1443void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1444{
1445 sl->timer.function = hrtimer_wakeup;
1446 sl->task = task;
1447}
1448EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
1449
1450static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
1451{
1452 hrtimer_init_sleeper(t, current);
1453
1454 do {
1455 set_current_state(TASK_INTERRUPTIBLE);
1456 hrtimer_start_expires(&t->timer, mode);
1457
1458 if (likely(t->task))
1459 freezable_schedule();
1460
1461 hrtimer_cancel(&t->timer);
1462 mode = HRTIMER_MODE_ABS;
1463
1464 } while (t->task && !signal_pending(current));
1465
1466 __set_current_state(TASK_RUNNING);
1467
1468 return t->task == NULL;
1469}
1470
1471static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1472{
1473 struct timespec rmt;
1474 ktime_t rem;
1475
1476 rem = hrtimer_expires_remaining(timer);
1477 if (rem.tv64 <= 0)
1478 return 0;
1479 rmt = ktime_to_timespec(rem);
1480
1481 if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
1482 return -EFAULT;
1483
1484 return 1;
1485}
1486
1487long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1488{
1489 struct hrtimer_sleeper t;
1490 struct timespec __user *rmtp;
1491 int ret = 0;
1492
1493 hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
1494 HRTIMER_MODE_ABS);
1495 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1496
1497 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1498 goto out;
1499
1500 rmtp = restart->nanosleep.rmtp;
1501 if (rmtp) {
1502 ret = update_rmtp(&t.timer, rmtp);
1503 if (ret <= 0)
1504 goto out;
1505 }
1506
1507
1508 ret = -ERESTART_RESTARTBLOCK;
1509out:
1510 destroy_hrtimer_on_stack(&t.timer);
1511 return ret;
1512}
1513
1514long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1515 const enum hrtimer_mode mode, const clockid_t clockid)
1516{
1517 struct restart_block *restart;
1518 struct hrtimer_sleeper t;
1519 int ret = 0;
1520 unsigned long slack;
1521
1522 slack = current->timer_slack_ns;
1523 if (dl_task(current) || rt_task(current))
1524 slack = 0;
1525
1526 hrtimer_init_on_stack(&t.timer, clockid, mode);
1527 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1528 if (do_nanosleep(&t, mode))
1529 goto out;
1530
1531
1532 if (mode == HRTIMER_MODE_ABS) {
1533 ret = -ERESTARTNOHAND;
1534 goto out;
1535 }
1536
1537 if (rmtp) {
1538 ret = update_rmtp(&t.timer, rmtp);
1539 if (ret <= 0)
1540 goto out;
1541 }
1542
1543 restart = ¤t_thread_info()->restart_block;
1544 restart->fn = hrtimer_nanosleep_restart;
1545 restart->nanosleep.clockid = t.timer.base->clockid;
1546 restart->nanosleep.rmtp = rmtp;
1547 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1548
1549 ret = -ERESTART_RESTARTBLOCK;
1550out:
1551 destroy_hrtimer_on_stack(&t.timer);
1552 return ret;
1553}
1554
1555SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1556 struct timespec __user *, rmtp)
1557{
1558 struct timespec tu;
1559
1560 if (copy_from_user(&tu, rqtp, sizeof(tu)))
1561 return -EFAULT;
1562
1563 if (!timespec_valid(&tu))
1564 return -EINVAL;
1565
1566 return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
1567}
1568
1569
1570
1571
1572static void init_hrtimers_cpu(int cpu)
1573{
1574 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1575 int i;
1576
1577 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1578 cpu_base->clock_base[i].cpu_base = cpu_base;
1579 timerqueue_init_head(&cpu_base->clock_base[i].active);
1580 }
1581
1582 cpu_base->cpu = cpu;
1583 hrtimer_init_hres(cpu_base);
1584}
1585
1586#ifdef CONFIG_HOTPLUG_CPU
1587
1588static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1589 struct hrtimer_clock_base *new_base)
1590{
1591 struct hrtimer *timer;
1592 struct timerqueue_node *node;
1593
1594 while ((node = timerqueue_getnext(&old_base->active))) {
1595 timer = container_of(node, struct hrtimer, node);
1596 BUG_ON(hrtimer_callback_running(timer));
1597 debug_deactivate(timer);
1598
1599
1600
1601
1602
1603
1604 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1605 timer->base = new_base;
1606
1607
1608
1609
1610
1611
1612
1613
1614 enqueue_hrtimer(timer, new_base);
1615
1616
1617 timer->state &= ~HRTIMER_STATE_MIGRATE;
1618 }
1619}
1620
1621static void migrate_hrtimers(int scpu)
1622{
1623 struct hrtimer_cpu_base *old_base, *new_base;
1624 int i;
1625
1626 BUG_ON(cpu_online(scpu));
1627 tick_cancel_sched_timer(scpu);
1628
1629 local_irq_disable();
1630 old_base = &per_cpu(hrtimer_bases, scpu);
1631 new_base = &__get_cpu_var(hrtimer_bases);
1632
1633
1634
1635
1636 raw_spin_lock(&new_base->lock);
1637 raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1638
1639 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1640 migrate_hrtimer_list(&old_base->clock_base[i],
1641 &new_base->clock_base[i]);
1642 }
1643
1644 raw_spin_unlock(&old_base->lock);
1645 raw_spin_unlock(&new_base->lock);
1646
1647
1648 __hrtimer_peek_ahead_timers();
1649 local_irq_enable();
1650}
1651
1652#endif
1653
1654static int hrtimer_cpu_notify(struct notifier_block *self,
1655 unsigned long action, void *hcpu)
1656{
1657 int scpu = (long)hcpu;
1658
1659 switch (action) {
1660
1661 case CPU_UP_PREPARE:
1662 case CPU_UP_PREPARE_FROZEN:
1663 init_hrtimers_cpu(scpu);
1664 break;
1665
1666#ifdef CONFIG_HOTPLUG_CPU
1667 case CPU_DYING:
1668 case CPU_DYING_FROZEN:
1669 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1670 break;
1671 case CPU_DEAD:
1672 case CPU_DEAD_FROZEN:
1673 {
1674 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1675 migrate_hrtimers(scpu);
1676 break;
1677 }
1678#endif
1679
1680 default:
1681 break;
1682 }
1683
1684 return NOTIFY_OK;
1685}
1686
1687static struct notifier_block hrtimers_nb = {
1688 .notifier_call = hrtimer_cpu_notify,
1689};
1690
1691void __init hrtimers_init(void)
1692{
1693 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1694 (void *)(long)smp_processor_id());
1695 register_cpu_notifier(&hrtimers_nb);
1696}
1697
1698
1699
1700
1701
1702
1703
1704
1705int __sched
1706schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
1707 const enum hrtimer_mode mode, int clock)
1708{
1709 struct hrtimer_sleeper t;
1710
1711
1712
1713
1714
1715 if (expires && !expires->tv64) {
1716 __set_current_state(TASK_RUNNING);
1717 return 0;
1718 }
1719
1720
1721
1722
1723 if (!expires) {
1724 schedule();
1725 __set_current_state(TASK_RUNNING);
1726 return -EINTR;
1727 }
1728
1729 hrtimer_init_on_stack(&t.timer, clock, mode);
1730 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1731
1732 hrtimer_init_sleeper(&t, current);
1733
1734 hrtimer_start_expires(&t.timer, mode);
1735
1736 if (likely(t.task))
1737 schedule();
1738
1739 hrtimer_cancel(&t.timer);
1740 destroy_hrtimer_on_stack(&t.timer);
1741
1742 __set_current_state(TASK_RUNNING);
1743
1744 return !t.task ? 0 : -EINTR;
1745}
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1776 const enum hrtimer_mode mode)
1777{
1778 return schedule_hrtimeout_range_clock(expires, delta, mode,
1779 CLOCK_MONOTONIC);
1780}
1781EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805int __sched schedule_hrtimeout(ktime_t *expires,
1806 const enum hrtimer_mode mode)
1807{
1808 return schedule_hrtimeout_range(expires, 0, mode);
1809}
1810EXPORT_SYMBOL_GPL(schedule_hrtimeout);
1811