1
2
3
4
5
6
7
8
9
10
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/percpu.h>
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/sched.h>
17#include <linux/sysdev.h>
18#include <linux/clocksource.h>
19#include <linux/jiffies.h>
20#include <linux/time.h>
21#include <linux/tick.h>
22#include <linux/stop_machine.h>
23
24
25struct timekeeper {
26
27 struct clocksource *clock;
28
29 int shift;
30
31
32 cycle_t cycle_interval;
33
34 u64 xtime_interval;
35
36 s64 xtime_remainder;
37
38 u32 raw_interval;
39
40
41 u64 xtime_nsec;
42
43
44 s64 ntp_error;
45
46
47 int ntp_error_shift;
48
49 u32 mult;
50};
51
52static struct timekeeper timekeeper;
53
54
55
56
57
58
59
60
61
62
63
64static void timekeeper_setup_internals(struct clocksource *clock)
65{
66 cycle_t interval;
67 u64 tmp, ntpinterval;
68
69 timekeeper.clock = clock;
70 clock->cycle_last = clock->read(clock);
71
72
73 tmp = NTP_INTERVAL_LENGTH;
74 tmp <<= clock->shift;
75 ntpinterval = tmp;
76 tmp += clock->mult/2;
77 do_div(tmp, clock->mult);
78 if (tmp == 0)
79 tmp = 1;
80
81 interval = (cycle_t) tmp;
82 timekeeper.cycle_interval = interval;
83
84
85 timekeeper.xtime_interval = (u64) interval * clock->mult;
86 timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval;
87 timekeeper.raw_interval =
88 ((u64) interval * clock->mult) >> clock->shift;
89
90 timekeeper.xtime_nsec = 0;
91 timekeeper.shift = clock->shift;
92
93 timekeeper.ntp_error = 0;
94 timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
95
96
97
98
99
100
101 timekeeper.mult = clock->mult;
102}
103
104
105static inline s64 timekeeping_get_ns(void)
106{
107 cycle_t cycle_now, cycle_delta;
108 struct clocksource *clock;
109
110
111 clock = timekeeper.clock;
112 cycle_now = clock->read(clock);
113
114
115 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
116
117
118 return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
119 timekeeper.shift);
120}
121
122static inline s64 timekeeping_get_ns_raw(void)
123{
124 cycle_t cycle_now, cycle_delta;
125 struct clocksource *clock;
126
127
128 clock = timekeeper.clock;
129 cycle_now = clock->read(clock);
130
131
132 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
133
134
135 return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
136}
137
138
139
140
141
142__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160static struct timespec xtime __attribute__ ((aligned (16)));
161static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
162static struct timespec total_sleep_time;
163
164
165
166
167static struct timespec raw_time;
168
169
170int __read_mostly timekeeping_suspended;
171
172
173void timekeeping_leap_insert(int leapsecond)
174{
175 xtime.tv_sec += leapsecond;
176 wall_to_monotonic.tv_sec -= leapsecond;
177 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
178 timekeeper.mult);
179}
180
181
182
183
184
185
186
187
188static void timekeeping_forward_now(void)
189{
190 cycle_t cycle_now, cycle_delta;
191 struct clocksource *clock;
192 s64 nsec;
193
194 clock = timekeeper.clock;
195 cycle_now = clock->read(clock);
196 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
197 clock->cycle_last = cycle_now;
198
199 nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
200 timekeeper.shift);
201
202
203 nsec += arch_gettimeoffset();
204
205 timespec_add_ns(&xtime, nsec);
206
207 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
208 timespec_add_ns(&raw_time, nsec);
209}
210
211
212
213
214
215
216
217void getnstimeofday(struct timespec *ts)
218{
219 unsigned long seq;
220 s64 nsecs;
221
222 WARN_ON(timekeeping_suspended);
223
224 do {
225 seq = read_seqbegin(&xtime_lock);
226
227 *ts = xtime;
228 nsecs = timekeeping_get_ns();
229
230
231 nsecs += arch_gettimeoffset();
232
233 } while (read_seqretry(&xtime_lock, seq));
234
235 timespec_add_ns(ts, nsecs);
236}
237
238EXPORT_SYMBOL(getnstimeofday);
239
240ktime_t ktime_get(void)
241{
242 unsigned int seq;
243 s64 secs, nsecs;
244
245 WARN_ON(timekeeping_suspended);
246
247 do {
248 seq = read_seqbegin(&xtime_lock);
249 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
250 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
251 nsecs += timekeeping_get_ns();
252
253 } while (read_seqretry(&xtime_lock, seq));
254
255
256
257
258 return ktime_add_ns(ktime_set(secs, 0), nsecs);
259}
260EXPORT_SYMBOL_GPL(ktime_get);
261
262
263
264
265
266
267
268
269
270void ktime_get_ts(struct timespec *ts)
271{
272 struct timespec tomono;
273 unsigned int seq;
274 s64 nsecs;
275
276 WARN_ON(timekeeping_suspended);
277
278 do {
279 seq = read_seqbegin(&xtime_lock);
280 *ts = xtime;
281 tomono = wall_to_monotonic;
282 nsecs = timekeeping_get_ns();
283
284 } while (read_seqretry(&xtime_lock, seq));
285
286 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
287 ts->tv_nsec + tomono.tv_nsec + nsecs);
288}
289EXPORT_SYMBOL_GPL(ktime_get_ts);
290
291#ifdef CONFIG_NTP_PPS
292
293
294
295
296
297
298
299
300
301
302void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
303{
304 unsigned long seq;
305 s64 nsecs_raw, nsecs_real;
306
307 WARN_ON_ONCE(timekeeping_suspended);
308
309 do {
310 u32 arch_offset;
311
312 seq = read_seqbegin(&xtime_lock);
313
314 *ts_raw = raw_time;
315 *ts_real = xtime;
316
317 nsecs_raw = timekeeping_get_ns_raw();
318 nsecs_real = timekeeping_get_ns();
319
320
321 arch_offset = arch_gettimeoffset();
322 nsecs_raw += arch_offset;
323 nsecs_real += arch_offset;
324
325 } while (read_seqretry(&xtime_lock, seq));
326
327 timespec_add_ns(ts_raw, nsecs_raw);
328 timespec_add_ns(ts_real, nsecs_real);
329}
330EXPORT_SYMBOL(getnstime_raw_and_real);
331
332#endif
333
334
335
336
337
338
339
340void do_gettimeofday(struct timeval *tv)
341{
342 struct timespec now;
343
344 getnstimeofday(&now);
345 tv->tv_sec = now.tv_sec;
346 tv->tv_usec = now.tv_nsec/1000;
347}
348
349EXPORT_SYMBOL(do_gettimeofday);
350
351
352
353
354
355
356int do_settimeofday(struct timespec *tv)
357{
358 struct timespec ts_delta;
359 unsigned long flags;
360
361 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
362 return -EINVAL;
363
364 write_seqlock_irqsave(&xtime_lock, flags);
365
366 timekeeping_forward_now();
367
368 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
369 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
370 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
371
372 xtime = *tv;
373
374 timekeeper.ntp_error = 0;
375 ntp_clear();
376
377 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
378 timekeeper.mult);
379
380 write_sequnlock_irqrestore(&xtime_lock, flags);
381
382
383 clock_was_set();
384
385 return 0;
386}
387
388EXPORT_SYMBOL(do_settimeofday);
389
390
391
392
393
394
395static int change_clocksource(void *data)
396{
397 struct clocksource *new, *old;
398
399 new = (struct clocksource *) data;
400
401 timekeeping_forward_now();
402 if (!new->enable || new->enable(new) == 0) {
403 old = timekeeper.clock;
404 timekeeper_setup_internals(new);
405 if (old->disable)
406 old->disable(old);
407 }
408 return 0;
409}
410
411
412
413
414
415
416
417
418void timekeeping_notify(struct clocksource *clock)
419{
420 if (timekeeper.clock == clock)
421 return;
422 stop_machine(change_clocksource, clock, NULL);
423 tick_clock_notify();
424}
425
426
427
428
429
430
431ktime_t ktime_get_real(void)
432{
433 struct timespec now;
434
435 getnstimeofday(&now);
436
437 return timespec_to_ktime(now);
438}
439EXPORT_SYMBOL_GPL(ktime_get_real);
440
441
442
443
444
445
446
447void getrawmonotonic(struct timespec *ts)
448{
449 unsigned long seq;
450 s64 nsecs;
451
452 do {
453 seq = read_seqbegin(&xtime_lock);
454 nsecs = timekeeping_get_ns_raw();
455 *ts = raw_time;
456
457 } while (read_seqretry(&xtime_lock, seq));
458
459 timespec_add_ns(ts, nsecs);
460}
461EXPORT_SYMBOL(getrawmonotonic);
462
463
464
465
466
467int timekeeping_valid_for_hres(void)
468{
469 unsigned long seq;
470 int ret;
471
472 do {
473 seq = read_seqbegin(&xtime_lock);
474
475 ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
476
477 } while (read_seqretry(&xtime_lock, seq));
478
479 return ret;
480}
481
482
483
484
485
486
487
488u64 timekeeping_max_deferment(void)
489{
490 return timekeeper.clock->max_idle_ns;
491}
492
493
494
495
496
497
498
499
500
501
502void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
503{
504 ts->tv_sec = 0;
505 ts->tv_nsec = 0;
506}
507
508
509
510
511
512
513
514
515
516
517void __attribute__((weak)) read_boot_clock(struct timespec *ts)
518{
519 ts->tv_sec = 0;
520 ts->tv_nsec = 0;
521}
522
523
524
525
526void __init timekeeping_init(void)
527{
528 struct clocksource *clock;
529 unsigned long flags;
530 struct timespec now, boot;
531
532 read_persistent_clock(&now);
533 read_boot_clock(&boot);
534
535 write_seqlock_irqsave(&xtime_lock, flags);
536
537 ntp_init();
538
539 clock = clocksource_default_clock();
540 if (clock->enable)
541 clock->enable(clock);
542 timekeeper_setup_internals(clock);
543
544 xtime.tv_sec = now.tv_sec;
545 xtime.tv_nsec = now.tv_nsec;
546 raw_time.tv_sec = 0;
547 raw_time.tv_nsec = 0;
548 if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
549 boot.tv_sec = xtime.tv_sec;
550 boot.tv_nsec = xtime.tv_nsec;
551 }
552 set_normalized_timespec(&wall_to_monotonic,
553 -boot.tv_sec, -boot.tv_nsec);
554 total_sleep_time.tv_sec = 0;
555 total_sleep_time.tv_nsec = 0;
556 write_sequnlock_irqrestore(&xtime_lock, flags);
557}
558
559
560static struct timespec timekeeping_suspend_time;
561
562
563
564
565
566
567
568
569
570static int timekeeping_resume(struct sys_device *dev)
571{
572 unsigned long flags;
573 struct timespec ts;
574
575 read_persistent_clock(&ts);
576
577 clocksource_resume();
578
579 write_seqlock_irqsave(&xtime_lock, flags);
580
581 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
582 ts = timespec_sub(ts, timekeeping_suspend_time);
583 xtime = timespec_add(xtime, ts);
584 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
585 total_sleep_time = timespec_add(total_sleep_time, ts);
586 }
587
588 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
589 timekeeper.ntp_error = 0;
590 timekeeping_suspended = 0;
591 write_sequnlock_irqrestore(&xtime_lock, flags);
592
593 touch_softlockup_watchdog();
594
595 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
596
597
598 hres_timers_resume();
599
600 return 0;
601}
602
603static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
604{
605 unsigned long flags;
606
607 read_persistent_clock(&timekeeping_suspend_time);
608
609 write_seqlock_irqsave(&xtime_lock, flags);
610 timekeeping_forward_now();
611 timekeeping_suspended = 1;
612 write_sequnlock_irqrestore(&xtime_lock, flags);
613
614 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
615 clocksource_suspend();
616
617 return 0;
618}
619
620
621static struct sysdev_class timekeeping_sysclass = {
622 .name = "timekeeping",
623 .resume = timekeeping_resume,
624 .suspend = timekeeping_suspend,
625};
626
627static struct sys_device device_timer = {
628 .id = 0,
629 .cls = &timekeeping_sysclass,
630};
631
632static int __init timekeeping_init_device(void)
633{
634 int error = sysdev_class_register(&timekeeping_sysclass);
635 if (!error)
636 error = sysdev_register(&device_timer);
637 return error;
638}
639
640device_initcall(timekeeping_init_device);
641
642
643
644
645
646static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
647 s64 *offset)
648{
649 s64 tick_error, i;
650 u32 look_ahead, adj;
651 s32 error2, mult;
652
653
654
655
656
657
658
659
660
661
662 error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
663 error2 = abs(error2);
664 for (look_ahead = 0; error2 > 0; look_ahead++)
665 error2 >>= 2;
666
667
668
669
670
671 tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
672 tick_error -= timekeeper.xtime_interval >> 1;
673 error = ((error - tick_error) >> look_ahead) + tick_error;
674
675
676 i = *interval;
677 mult = 1;
678 if (error < 0) {
679 error = -error;
680 *interval = -*interval;
681 *offset = -*offset;
682 mult = -1;
683 }
684 for (adj = 0; error > i; adj++)
685 error >>= 1;
686
687 *interval <<= adj;
688 *offset <<= adj;
689 return mult << adj;
690}
691
692
693
694
695
696
697static void timekeeping_adjust(s64 offset)
698{
699 s64 error, interval = timekeeper.cycle_interval;
700 int adj;
701
702 error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
703 if (error > interval) {
704 error >>= 2;
705 if (likely(error <= interval))
706 adj = 1;
707 else
708 adj = timekeeping_bigadjust(error, &interval, &offset);
709 } else if (error < -interval) {
710 error >>= 2;
711 if (likely(error >= -interval)) {
712 adj = -1;
713 interval = -interval;
714 offset = -offset;
715 } else
716 adj = timekeeping_bigadjust(error, &interval, &offset);
717 } else
718 return;
719
720 timekeeper.mult += adj;
721 timekeeper.xtime_interval += interval;
722 timekeeper.xtime_nsec -= offset;
723 timekeeper.ntp_error -= (interval - offset) <<
724 timekeeper.ntp_error_shift;
725}
726
727
728
729
730
731
732
733
734
735
736
737static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
738{
739 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
740 u64 raw_nsecs;
741
742
743 if (offset < timekeeper.cycle_interval<<shift)
744 return offset;
745
746
747 offset -= timekeeper.cycle_interval << shift;
748 timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
749
750 timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
751 while (timekeeper.xtime_nsec >= nsecps) {
752 timekeeper.xtime_nsec -= nsecps;
753 xtime.tv_sec++;
754 second_overflow();
755 }
756
757
758 raw_nsecs = timekeeper.raw_interval << shift;
759 raw_nsecs += raw_time.tv_nsec;
760 if (raw_nsecs >= NSEC_PER_SEC) {
761 u64 raw_secs = raw_nsecs;
762 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
763 raw_time.tv_sec += raw_secs;
764 }
765 raw_time.tv_nsec = raw_nsecs;
766
767
768 timekeeper.ntp_error += tick_length << shift;
769 timekeeper.ntp_error -=
770 (timekeeper.xtime_interval + timekeeper.xtime_remainder) <<
771 (timekeeper.ntp_error_shift + shift);
772
773 return offset;
774}
775
776
777
778
779
780
781
782void update_wall_time(void)
783{
784 struct clocksource *clock;
785 cycle_t offset;
786 int shift = 0, maxshift;
787
788
789 if (unlikely(timekeeping_suspended))
790 return;
791
792 clock = timekeeper.clock;
793
794#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
795 offset = timekeeper.cycle_interval;
796#else
797 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
798#endif
799 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
800
801
802
803
804
805
806
807
808
809 shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
810 shift = max(0, shift);
811
812 maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1;
813 shift = min(shift, maxshift);
814 while (offset >= timekeeper.cycle_interval) {
815 offset = logarithmic_accumulation(offset, shift);
816 if(offset < timekeeper.cycle_interval<<shift)
817 shift--;
818 }
819
820
821 timekeeping_adjust(offset);
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839 if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
840 s64 neg = -(s64)timekeeper.xtime_nsec;
841 timekeeper.xtime_nsec = 0;
842 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
843 }
844
845
846
847
848
849
850 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
851 timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
852 timekeeper.ntp_error += timekeeper.xtime_nsec <<
853 timekeeper.ntp_error_shift;
854
855
856
857
858
859 if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
860 xtime.tv_nsec -= NSEC_PER_SEC;
861 xtime.tv_sec++;
862 second_overflow();
863 }
864
865
866 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
867 timekeeper.mult);
868}
869
870
871
872
873
874
875
876
877
878
879
880
881void getboottime(struct timespec *ts)
882{
883 struct timespec boottime = {
884 .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec,
885 .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec
886 };
887
888 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
889}
890EXPORT_SYMBOL_GPL(getboottime);
891
892
893
894
895
896void monotonic_to_bootbased(struct timespec *ts)
897{
898 *ts = timespec_add(*ts, total_sleep_time);
899}
900EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
901
902unsigned long get_seconds(void)
903{
904 return xtime.tv_sec;
905}
906EXPORT_SYMBOL(get_seconds);
907
908struct timespec __current_kernel_time(void)
909{
910 return xtime;
911}
912
913struct timespec __get_wall_to_monotonic(void)
914{
915 return wall_to_monotonic;
916}
917
918struct timespec current_kernel_time(void)
919{
920 struct timespec now;
921 unsigned long seq;
922
923 do {
924 seq = read_seqbegin(&xtime_lock);
925
926 now = xtime;
927 } while (read_seqretry(&xtime_lock, seq));
928
929 return now;
930}
931EXPORT_SYMBOL(current_kernel_time);
932
933struct timespec get_monotonic_coarse(void)
934{
935 struct timespec now, mono;
936 unsigned long seq;
937
938 do {
939 seq = read_seqbegin(&xtime_lock);
940
941 now = xtime;
942 mono = wall_to_monotonic;
943 } while (read_seqretry(&xtime_lock, seq));
944
945 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
946 now.tv_nsec + mono.tv_nsec);
947 return now;
948}
949