1
2
3
4
5
6
7
8
9
10
11#include <linux/timekeeper_internal.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/syscore_ops.h>
19#include <linux/clocksource.h>
20#include <linux/jiffies.h>
21#include <linux/time.h>
22#include <linux/tick.h>
23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h>
25#include <linux/compiler.h>
26
27#include "tick-internal.h"
28#include "ntp_internal.h"
29#include "timekeeping_internal.h"
30
31#define TK_CLEAR_NTP (1 << 0)
32#define TK_MIRROR (1 << 1)
33#define TK_CLOCK_WAS_SET (1 << 2)
34
35
36
37
38
39static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42} tk_core ____cacheline_aligned;
43
44static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45static struct timekeeper shadow_timekeeper;
46
47
48
49
50
51
52
53
54
55
56struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59};
60
61static struct tk_fast tk_fast_mono ____cacheline_aligned;
62static struct tk_fast tk_fast_raw ____cacheline_aligned;
63
64
65int __read_mostly timekeeping_suspended;
66
67static inline void tk_normalize_xtime(struct timekeeper *tk)
68{
69 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
70 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
71 tk->xtime_sec++;
72 }
73}
74
75static inline struct timespec64 tk_xtime(struct timekeeper *tk)
76{
77 struct timespec64 ts;
78
79 ts.tv_sec = tk->xtime_sec;
80 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
81 return ts;
82}
83
84static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
85{
86 tk->xtime_sec = ts->tv_sec;
87 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
88}
89
90static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
91{
92 tk->xtime_sec += ts->tv_sec;
93 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
94 tk_normalize_xtime(tk);
95}
96
97static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
98{
99 struct timespec64 tmp;
100
101
102
103
104
105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106 -tk->wall_to_monotonic.tv_nsec);
107 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
108 tk->wall_to_monotonic = wtm;
109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
110 tk->offs_real = timespec64_to_ktime(tmp);
111 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
112}
113
114static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
115{
116 tk->offs_boot = ktime_add(tk->offs_boot, delta);
117}
118
119#ifdef CONFIG_DEBUG_TIMEKEEPING
120#define WARNING_FREQ (HZ*300)
121
122static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
123{
124
125 cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
126 const char *name = tk->tkr_mono.clock->name;
127
128 if (offset > max_cycles) {
129 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
130 offset, name, max_cycles);
131 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
132 } else {
133 if (offset > (max_cycles >> 1)) {
134 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
135 offset, name, max_cycles >> 1);
136 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
137 }
138 }
139
140 if (tk->underflow_seen) {
141 if (jiffies - tk->last_warning > WARNING_FREQ) {
142 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
143 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
144 printk_deferred(" Your kernel is probably still fine.\n");
145 tk->last_warning = jiffies;
146 }
147 tk->underflow_seen = 0;
148 }
149
150 if (tk->overflow_seen) {
151 if (jiffies - tk->last_warning > WARNING_FREQ) {
152 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
153 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
154 printk_deferred(" Your kernel is probably still fine.\n");
155 tk->last_warning = jiffies;
156 }
157 tk->overflow_seen = 0;
158 }
159}
160
161static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
162{
163 struct timekeeper *tk = &tk_core.timekeeper;
164 cycle_t now, last, mask, max, delta;
165 unsigned int seq;
166
167
168
169
170
171
172
173
174 do {
175 seq = read_seqcount_begin(&tk_core.seq);
176 now = tkr->read(tkr->clock);
177 last = tkr->cycle_last;
178 mask = tkr->mask;
179 max = tkr->clock->max_cycles;
180 } while (read_seqcount_retry(&tk_core.seq, seq));
181
182 delta = clocksource_delta(now, last, mask);
183
184
185
186
187
188 if (unlikely((~delta & mask) < (mask >> 3))) {
189 tk->underflow_seen = 1;
190 delta = 0;
191 }
192
193
194 if (unlikely(delta > max)) {
195 tk->overflow_seen = 1;
196 delta = tkr->clock->max_cycles;
197 }
198
199 return delta;
200}
201#else
202static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
203{
204}
205static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
206{
207 cycle_t cycle_now, delta;
208
209
210 cycle_now = tkr->read(tkr->clock);
211
212
213 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
214
215 return delta;
216}
217#endif
218
219
220
221
222
223
224
225
226
227
228
229
230static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
231{
232 cycle_t interval;
233 u64 tmp, ntpinterval;
234 struct clocksource *old_clock;
235
236 ++tk->cs_was_changed_seq;
237 old_clock = tk->tkr_mono.clock;
238 tk->tkr_mono.clock = clock;
239 tk->tkr_mono.read = clock->read;
240 tk->tkr_mono.mask = clock->mask;
241 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
242
243 tk->tkr_raw.clock = clock;
244 tk->tkr_raw.read = clock->read;
245 tk->tkr_raw.mask = clock->mask;
246 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
247
248
249 tmp = NTP_INTERVAL_LENGTH;
250 tmp <<= clock->shift;
251 ntpinterval = tmp;
252 tmp += clock->mult/2;
253 do_div(tmp, clock->mult);
254 if (tmp == 0)
255 tmp = 1;
256
257 interval = (cycle_t) tmp;
258 tk->cycle_interval = interval;
259
260
261 tk->xtime_interval = (u64) interval * clock->mult;
262 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
263 tk->raw_interval =
264 ((u64) interval * clock->mult) >> clock->shift;
265
266
267 if (old_clock) {
268 int shift_change = clock->shift - old_clock->shift;
269 if (shift_change < 0)
270 tk->tkr_mono.xtime_nsec >>= -shift_change;
271 else
272 tk->tkr_mono.xtime_nsec <<= shift_change;
273 }
274 tk->tkr_raw.xtime_nsec = 0;
275
276 tk->tkr_mono.shift = clock->shift;
277 tk->tkr_raw.shift = clock->shift;
278
279 tk->ntp_error = 0;
280 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
281 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
282
283
284
285
286
287
288 tk->tkr_mono.mult = clock->mult;
289 tk->tkr_raw.mult = clock->mult;
290 tk->ntp_err_mult = 0;
291}
292
293
294
295#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
296static u32 default_arch_gettimeoffset(void) { return 0; }
297u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
298#else
299static inline u32 arch_gettimeoffset(void) { return 0; }
300#endif
301
302static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
303 cycle_t delta)
304{
305 s64 nsec;
306
307 nsec = delta * tkr->mult + tkr->xtime_nsec;
308 nsec >>= tkr->shift;
309
310
311 return nsec + arch_gettimeoffset();
312}
313
314static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
315{
316 cycle_t delta;
317
318 delta = timekeeping_get_delta(tkr);
319 return timekeeping_delta_to_ns(tkr, delta);
320}
321
322static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr,
323 cycle_t cycles)
324{
325 cycle_t delta;
326
327
328 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
329 return timekeeping_delta_to_ns(tkr, delta);
330}
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
347{
348 struct tk_read_base *base = tkf->base;
349
350
351 raw_write_seqcount_latch(&tkf->seq);
352
353
354 memcpy(base, tkr, sizeof(*base));
355
356
357 raw_write_seqcount_latch(&tkf->seq);
358
359
360 memcpy(base + 1, base, sizeof(*base));
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
396{
397 struct tk_read_base *tkr;
398 unsigned int seq;
399 u64 now;
400
401 do {
402 seq = raw_read_seqcount_latch(&tkf->seq);
403 tkr = tkf->base + (seq & 0x01);
404 now = ktime_to_ns(tkr->base);
405
406 now += timekeeping_delta_to_ns(tkr,
407 clocksource_delta(
408 tkr->read(tkr->clock),
409 tkr->cycle_last,
410 tkr->mask));
411 } while (read_seqcount_retry(&tkf->seq, seq));
412
413 return now;
414}
415
416u64 ktime_get_mono_fast_ns(void)
417{
418 return __ktime_get_fast_ns(&tk_fast_mono);
419}
420EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
421
422u64 ktime_get_raw_fast_ns(void)
423{
424 return __ktime_get_fast_ns(&tk_fast_raw);
425}
426EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
427
428
429static cycle_t cycles_at_suspend;
430
431static cycle_t dummy_clock_read(struct clocksource *cs)
432{
433 return cycles_at_suspend;
434}
435
436
437
438
439
440
441
442
443
444
445
446static void halt_fast_timekeeper(struct timekeeper *tk)
447{
448 static struct tk_read_base tkr_dummy;
449 struct tk_read_base *tkr = &tk->tkr_mono;
450
451 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
452 cycles_at_suspend = tkr->read(tkr->clock);
453 tkr_dummy.read = dummy_clock_read;
454 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
455
456 tkr = &tk->tkr_raw;
457 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
458 tkr_dummy.read = dummy_clock_read;
459 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
460}
461
462#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
463
464static inline void update_vsyscall(struct timekeeper *tk)
465{
466 struct timespec xt, wm;
467
468 xt = timespec64_to_timespec(tk_xtime(tk));
469 wm = timespec64_to_timespec(tk->wall_to_monotonic);
470 update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
471 tk->tkr_mono.cycle_last);
472}
473
474static inline void old_vsyscall_fixup(struct timekeeper *tk)
475{
476 s64 remainder;
477
478
479
480
481
482
483
484
485
486
487
488 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
489 if (remainder != 0) {
490 tk->tkr_mono.xtime_nsec -= remainder;
491 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
492 tk->ntp_error += remainder << tk->ntp_error_shift;
493 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
494 }
495}
496#else
497#define old_vsyscall_fixup(tk)
498#endif
499
500static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
501
502static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
503{
504 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
505}
506
507
508
509
510int pvclock_gtod_register_notifier(struct notifier_block *nb)
511{
512 struct timekeeper *tk = &tk_core.timekeeper;
513 unsigned long flags;
514 int ret;
515
516 raw_spin_lock_irqsave(&timekeeper_lock, flags);
517 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
518 update_pvclock_gtod(tk, true);
519 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
520
521 return ret;
522}
523EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
524
525
526
527
528
529int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
530{
531 unsigned long flags;
532 int ret;
533
534 raw_spin_lock_irqsave(&timekeeper_lock, flags);
535 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
536 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
537
538 return ret;
539}
540EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
541
542
543
544
545static inline void tk_update_leap_state(struct timekeeper *tk)
546{
547 tk->next_leap_ktime = ntp_get_next_leap();
548 if (tk->next_leap_ktime.tv64 != KTIME_MAX)
549
550 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
551}
552
553
554
555
556static inline void tk_update_ktime_data(struct timekeeper *tk)
557{
558 u64 seconds;
559 u32 nsec;
560
561
562
563
564
565
566
567
568 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
569 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
570 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
571
572
573 tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
574
575
576
577
578
579
580 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
581 if (nsec >= NSEC_PER_SEC)
582 seconds++;
583 tk->ktime_sec = seconds;
584}
585
586
587static void timekeeping_update(struct timekeeper *tk, unsigned int action)
588{
589 if (action & TK_CLEAR_NTP) {
590 tk->ntp_error = 0;
591 ntp_clear();
592 }
593
594 tk_update_leap_state(tk);
595 tk_update_ktime_data(tk);
596
597 update_vsyscall(tk);
598 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
599
600 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
601 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
602
603 if (action & TK_CLOCK_WAS_SET)
604 tk->clock_was_set_seq++;
605
606
607
608
609
610 if (action & TK_MIRROR)
611 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
612 sizeof(tk_core.timekeeper));
613}
614
615
616
617
618
619
620
621
622static void timekeeping_forward_now(struct timekeeper *tk)
623{
624 struct clocksource *clock = tk->tkr_mono.clock;
625 cycle_t cycle_now, delta;
626 s64 nsec;
627
628 cycle_now = tk->tkr_mono.read(clock);
629 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
630 tk->tkr_mono.cycle_last = cycle_now;
631 tk->tkr_raw.cycle_last = cycle_now;
632
633 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
634
635
636 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
637
638 tk_normalize_xtime(tk);
639
640 nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
641 timespec64_add_ns(&tk->raw_time, nsec);
642}
643
644
645
646
647
648
649
650
651int __getnstimeofday64(struct timespec64 *ts)
652{
653 struct timekeeper *tk = &tk_core.timekeeper;
654 unsigned long seq;
655 s64 nsecs = 0;
656
657 do {
658 seq = read_seqcount_begin(&tk_core.seq);
659
660 ts->tv_sec = tk->xtime_sec;
661 nsecs = timekeeping_get_ns(&tk->tkr_mono);
662
663 } while (read_seqcount_retry(&tk_core.seq, seq));
664
665 ts->tv_nsec = 0;
666 timespec64_add_ns(ts, nsecs);
667
668
669
670
671
672 if (unlikely(timekeeping_suspended))
673 return -EAGAIN;
674 return 0;
675}
676EXPORT_SYMBOL(__getnstimeofday64);
677
678
679
680
681
682
683
684void getnstimeofday64(struct timespec64 *ts)
685{
686 WARN_ON(__getnstimeofday64(ts));
687}
688EXPORT_SYMBOL(getnstimeofday64);
689
690ktime_t ktime_get(void)
691{
692 struct timekeeper *tk = &tk_core.timekeeper;
693 unsigned int seq;
694 ktime_t base;
695 s64 nsecs;
696
697 WARN_ON(timekeeping_suspended);
698
699 do {
700 seq = read_seqcount_begin(&tk_core.seq);
701 base = tk->tkr_mono.base;
702 nsecs = timekeeping_get_ns(&tk->tkr_mono);
703
704 } while (read_seqcount_retry(&tk_core.seq, seq));
705
706 return ktime_add_ns(base, nsecs);
707}
708EXPORT_SYMBOL_GPL(ktime_get);
709
710u32 ktime_get_resolution_ns(void)
711{
712 struct timekeeper *tk = &tk_core.timekeeper;
713 unsigned int seq;
714 u32 nsecs;
715
716 WARN_ON(timekeeping_suspended);
717
718 do {
719 seq = read_seqcount_begin(&tk_core.seq);
720 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
721 } while (read_seqcount_retry(&tk_core.seq, seq));
722
723 return nsecs;
724}
725EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
726
727static ktime_t *offsets[TK_OFFS_MAX] = {
728 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
729 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
730 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
731};
732
733ktime_t ktime_get_with_offset(enum tk_offsets offs)
734{
735 struct timekeeper *tk = &tk_core.timekeeper;
736 unsigned int seq;
737 ktime_t base, *offset = offsets[offs];
738 s64 nsecs;
739
740 WARN_ON(timekeeping_suspended);
741
742 do {
743 seq = read_seqcount_begin(&tk_core.seq);
744 base = ktime_add(tk->tkr_mono.base, *offset);
745 nsecs = timekeeping_get_ns(&tk->tkr_mono);
746
747 } while (read_seqcount_retry(&tk_core.seq, seq));
748
749 return ktime_add_ns(base, nsecs);
750
751}
752EXPORT_SYMBOL_GPL(ktime_get_with_offset);
753
754
755
756
757
758
759ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
760{
761 ktime_t *offset = offsets[offs];
762 unsigned long seq;
763 ktime_t tconv;
764
765 do {
766 seq = read_seqcount_begin(&tk_core.seq);
767 tconv = ktime_add(tmono, *offset);
768 } while (read_seqcount_retry(&tk_core.seq, seq));
769
770 return tconv;
771}
772EXPORT_SYMBOL_GPL(ktime_mono_to_any);
773
774
775
776
777ktime_t ktime_get_raw(void)
778{
779 struct timekeeper *tk = &tk_core.timekeeper;
780 unsigned int seq;
781 ktime_t base;
782 s64 nsecs;
783
784 do {
785 seq = read_seqcount_begin(&tk_core.seq);
786 base = tk->tkr_raw.base;
787 nsecs = timekeeping_get_ns(&tk->tkr_raw);
788
789 } while (read_seqcount_retry(&tk_core.seq, seq));
790
791 return ktime_add_ns(base, nsecs);
792}
793EXPORT_SYMBOL_GPL(ktime_get_raw);
794
795
796
797
798
799
800
801
802
803void ktime_get_ts64(struct timespec64 *ts)
804{
805 struct timekeeper *tk = &tk_core.timekeeper;
806 struct timespec64 tomono;
807 s64 nsec;
808 unsigned int seq;
809
810 WARN_ON(timekeeping_suspended);
811
812 do {
813 seq = read_seqcount_begin(&tk_core.seq);
814 ts->tv_sec = tk->xtime_sec;
815 nsec = timekeeping_get_ns(&tk->tkr_mono);
816 tomono = tk->wall_to_monotonic;
817
818 } while (read_seqcount_retry(&tk_core.seq, seq));
819
820 ts->tv_sec += tomono.tv_sec;
821 ts->tv_nsec = 0;
822 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
823}
824EXPORT_SYMBOL_GPL(ktime_get_ts64);
825
826
827
828
829
830
831
832
833
834
835time64_t ktime_get_seconds(void)
836{
837 struct timekeeper *tk = &tk_core.timekeeper;
838
839 WARN_ON(timekeeping_suspended);
840 return tk->ktime_sec;
841}
842EXPORT_SYMBOL_GPL(ktime_get_seconds);
843
844
845
846
847
848
849
850
851
852
853
854
855time64_t ktime_get_real_seconds(void)
856{
857 struct timekeeper *tk = &tk_core.timekeeper;
858 time64_t seconds;
859 unsigned int seq;
860
861 if (IS_ENABLED(CONFIG_64BIT))
862 return tk->xtime_sec;
863
864 do {
865 seq = read_seqcount_begin(&tk_core.seq);
866 seconds = tk->xtime_sec;
867
868 } while (read_seqcount_retry(&tk_core.seq, seq));
869
870 return seconds;
871}
872EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
873
874
875
876
877
878
879time64_t __ktime_get_real_seconds(void)
880{
881 struct timekeeper *tk = &tk_core.timekeeper;
882
883 return tk->xtime_sec;
884}
885
886
887
888
889
890void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
891{
892 struct timekeeper *tk = &tk_core.timekeeper;
893 unsigned long seq;
894 ktime_t base_raw;
895 ktime_t base_real;
896 s64 nsec_raw;
897 s64 nsec_real;
898 cycle_t now;
899
900 WARN_ON_ONCE(timekeeping_suspended);
901
902 do {
903 seq = read_seqcount_begin(&tk_core.seq);
904
905 now = tk->tkr_mono.read(tk->tkr_mono.clock);
906 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
907 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
908 base_real = ktime_add(tk->tkr_mono.base,
909 tk_core.timekeeper.offs_real);
910 base_raw = tk->tkr_raw.base;
911 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
912 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
913 } while (read_seqcount_retry(&tk_core.seq, seq));
914
915 systime_snapshot->cycles = now;
916 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
917 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
918}
919EXPORT_SYMBOL_GPL(ktime_get_snapshot);
920
921
922static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
923{
924 u64 tmp, rem;
925
926 tmp = div64_u64_rem(*base, div, &rem);
927
928 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
929 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
930 return -EOVERFLOW;
931 tmp *= mult;
932 rem *= mult;
933
934 do_div(rem, div);
935 *base = tmp + rem;
936 return 0;
937}
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956static int adjust_historical_crosststamp(struct system_time_snapshot *history,
957 cycle_t partial_history_cycles,
958 cycle_t total_history_cycles,
959 bool discontinuity,
960 struct system_device_crosststamp *ts)
961{
962 struct timekeeper *tk = &tk_core.timekeeper;
963 u64 corr_raw, corr_real;
964 bool interp_forward;
965 int ret;
966
967 if (total_history_cycles == 0 || partial_history_cycles == 0)
968 return 0;
969
970
971 interp_forward = partial_history_cycles > total_history_cycles/2 ?
972 true : false;
973 partial_history_cycles = interp_forward ?
974 total_history_cycles - partial_history_cycles :
975 partial_history_cycles;
976
977
978
979
980
981 corr_raw = (u64)ktime_to_ns(
982 ktime_sub(ts->sys_monoraw, history->raw));
983 ret = scale64_check_overflow(partial_history_cycles,
984 total_history_cycles, &corr_raw);
985 if (ret)
986 return ret;
987
988
989
990
991
992
993
994
995 if (discontinuity) {
996 corr_real = mul_u64_u32_div
997 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
998 } else {
999 corr_real = (u64)ktime_to_ns(
1000 ktime_sub(ts->sys_realtime, history->real));
1001 ret = scale64_check_overflow(partial_history_cycles,
1002 total_history_cycles, &corr_real);
1003 if (ret)
1004 return ret;
1005 }
1006
1007
1008 if (interp_forward) {
1009 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1010 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1011 } else {
1012 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1013 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1014 }
1015
1016 return 0;
1017}
1018
1019
1020
1021
1022static bool cycle_between(cycle_t before, cycle_t test, cycle_t after)
1023{
1024 if (test > before && test < after)
1025 return true;
1026 if (test < before && before > after)
1027 return true;
1028 return false;
1029}
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042int get_device_system_crosststamp(int (*get_time_fn)
1043 (ktime_t *device_time,
1044 struct system_counterval_t *sys_counterval,
1045 void *ctx),
1046 void *ctx,
1047 struct system_time_snapshot *history_begin,
1048 struct system_device_crosststamp *xtstamp)
1049{
1050 struct system_counterval_t system_counterval;
1051 struct timekeeper *tk = &tk_core.timekeeper;
1052 cycle_t cycles, now, interval_start;
1053 unsigned int clock_was_set_seq = 0;
1054 ktime_t base_real, base_raw;
1055 s64 nsec_real, nsec_raw;
1056 u8 cs_was_changed_seq;
1057 unsigned long seq;
1058 bool do_interp;
1059 int ret;
1060
1061 do {
1062 seq = read_seqcount_begin(&tk_core.seq);
1063
1064
1065
1066
1067 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1068 if (ret)
1069 return ret;
1070
1071
1072
1073
1074
1075
1076 if (tk->tkr_mono.clock != system_counterval.cs)
1077 return -ENODEV;
1078 cycles = system_counterval.cycles;
1079
1080
1081
1082
1083
1084 now = tk->tkr_mono.read(tk->tkr_mono.clock);
1085 interval_start = tk->tkr_mono.cycle_last;
1086 if (!cycle_between(interval_start, cycles, now)) {
1087 clock_was_set_seq = tk->clock_was_set_seq;
1088 cs_was_changed_seq = tk->cs_was_changed_seq;
1089 cycles = interval_start;
1090 do_interp = true;
1091 } else {
1092 do_interp = false;
1093 }
1094
1095 base_real = ktime_add(tk->tkr_mono.base,
1096 tk_core.timekeeper.offs_real);
1097 base_raw = tk->tkr_raw.base;
1098
1099 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1100 system_counterval.cycles);
1101 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1102 system_counterval.cycles);
1103 } while (read_seqcount_retry(&tk_core.seq, seq));
1104
1105 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1106 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1107
1108
1109
1110
1111
1112 if (do_interp) {
1113 cycle_t partial_history_cycles, total_history_cycles;
1114 bool discontinuity;
1115
1116
1117
1118
1119
1120
1121 if (!history_begin ||
1122 !cycle_between(history_begin->cycles,
1123 system_counterval.cycles, cycles) ||
1124 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1125 return -EINVAL;
1126 partial_history_cycles = cycles - system_counterval.cycles;
1127 total_history_cycles = cycles - history_begin->cycles;
1128 discontinuity =
1129 history_begin->clock_was_set_seq != clock_was_set_seq;
1130
1131 ret = adjust_historical_crosststamp(history_begin,
1132 partial_history_cycles,
1133 total_history_cycles,
1134 discontinuity, xtstamp);
1135 if (ret)
1136 return ret;
1137 }
1138
1139 return 0;
1140}
1141EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1142
1143
1144
1145
1146
1147
1148
1149void do_gettimeofday(struct timeval *tv)
1150{
1151 struct timespec64 now;
1152
1153 getnstimeofday64(&now);
1154 tv->tv_sec = now.tv_sec;
1155 tv->tv_usec = now.tv_nsec/1000;
1156}
1157EXPORT_SYMBOL(do_gettimeofday);
1158
1159
1160
1161
1162
1163
1164
1165int do_settimeofday64(const struct timespec64 *ts)
1166{
1167 struct timekeeper *tk = &tk_core.timekeeper;
1168 struct timespec64 ts_delta, xt;
1169 unsigned long flags;
1170 int ret = 0;
1171
1172 if (!timespec64_valid_strict(ts))
1173 return -EINVAL;
1174
1175 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1176 write_seqcount_begin(&tk_core.seq);
1177
1178 timekeeping_forward_now(tk);
1179
1180 xt = tk_xtime(tk);
1181 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1182 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1183
1184 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1185 ret = -EINVAL;
1186 goto out;
1187 }
1188
1189 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1190
1191 tk_set_xtime(tk, ts);
1192out:
1193 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1194
1195 write_seqcount_end(&tk_core.seq);
1196 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1197
1198
1199 clock_was_set();
1200
1201 return ret;
1202}
1203EXPORT_SYMBOL(do_settimeofday64);
1204
1205
1206
1207
1208
1209
1210
1211int timekeeping_inject_offset(struct timespec *ts)
1212{
1213 struct timekeeper *tk = &tk_core.timekeeper;
1214 unsigned long flags;
1215 struct timespec64 ts64, tmp;
1216 int ret = 0;
1217
1218 if (!timespec_inject_offset_valid(ts))
1219 return -EINVAL;
1220
1221 ts64 = timespec_to_timespec64(*ts);
1222
1223 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1224 write_seqcount_begin(&tk_core.seq);
1225
1226 timekeeping_forward_now(tk);
1227
1228
1229 tmp = timespec64_add(tk_xtime(tk), ts64);
1230 if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
1231 !timespec64_valid_strict(&tmp)) {
1232 ret = -EINVAL;
1233 goto error;
1234 }
1235
1236 tk_xtime_add(tk, &ts64);
1237 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
1238
1239error:
1240 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1241
1242 write_seqcount_end(&tk_core.seq);
1243 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1244
1245
1246 clock_was_set();
1247
1248 return ret;
1249}
1250EXPORT_SYMBOL(timekeeping_inject_offset);
1251
1252
1253
1254
1255
1256
1257s32 timekeeping_get_tai_offset(void)
1258{
1259 struct timekeeper *tk = &tk_core.timekeeper;
1260 unsigned int seq;
1261 s32 ret;
1262
1263 do {
1264 seq = read_seqcount_begin(&tk_core.seq);
1265 ret = tk->tai_offset;
1266 } while (read_seqcount_retry(&tk_core.seq, seq));
1267
1268 return ret;
1269}
1270
1271
1272
1273
1274
1275static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1276{
1277 tk->tai_offset = tai_offset;
1278 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1279}
1280
1281
1282
1283
1284
1285void timekeeping_set_tai_offset(s32 tai_offset)
1286{
1287 struct timekeeper *tk = &tk_core.timekeeper;
1288 unsigned long flags;
1289
1290 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1291 write_seqcount_begin(&tk_core.seq);
1292 __timekeeping_set_tai_offset(tk, tai_offset);
1293 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1294 write_seqcount_end(&tk_core.seq);
1295 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1296 clock_was_set();
1297}
1298
1299
1300
1301
1302
1303
1304static int change_clocksource(void *data)
1305{
1306 struct timekeeper *tk = &tk_core.timekeeper;
1307 struct clocksource *new, *old;
1308 unsigned long flags;
1309
1310 new = (struct clocksource *) data;
1311
1312 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1313 write_seqcount_begin(&tk_core.seq);
1314
1315 timekeeping_forward_now(tk);
1316
1317
1318
1319
1320 if (try_module_get(new->owner)) {
1321 if (!new->enable || new->enable(new) == 0) {
1322 old = tk->tkr_mono.clock;
1323 tk_setup_internals(tk, new);
1324 if (old->disable)
1325 old->disable(old);
1326 module_put(old->owner);
1327 } else {
1328 module_put(new->owner);
1329 }
1330 }
1331 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1332
1333 write_seqcount_end(&tk_core.seq);
1334 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1335
1336 return 0;
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346int timekeeping_notify(struct clocksource *clock)
1347{
1348 struct timekeeper *tk = &tk_core.timekeeper;
1349
1350 if (tk->tkr_mono.clock == clock)
1351 return 0;
1352 stop_machine(change_clocksource, clock, NULL);
1353 tick_clock_notify();
1354 return tk->tkr_mono.clock == clock ? 0 : -1;
1355}
1356
1357
1358
1359
1360
1361
1362
1363void getrawmonotonic64(struct timespec64 *ts)
1364{
1365 struct timekeeper *tk = &tk_core.timekeeper;
1366 struct timespec64 ts64;
1367 unsigned long seq;
1368 s64 nsecs;
1369
1370 do {
1371 seq = read_seqcount_begin(&tk_core.seq);
1372 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1373 ts64 = tk->raw_time;
1374
1375 } while (read_seqcount_retry(&tk_core.seq, seq));
1376
1377 timespec64_add_ns(&ts64, nsecs);
1378 *ts = ts64;
1379}
1380EXPORT_SYMBOL(getrawmonotonic64);
1381
1382
1383
1384
1385
1386int timekeeping_valid_for_hres(void)
1387{
1388 struct timekeeper *tk = &tk_core.timekeeper;
1389 unsigned long seq;
1390 int ret;
1391
1392 do {
1393 seq = read_seqcount_begin(&tk_core.seq);
1394
1395 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1396
1397 } while (read_seqcount_retry(&tk_core.seq, seq));
1398
1399 return ret;
1400}
1401
1402
1403
1404
1405u64 timekeeping_max_deferment(void)
1406{
1407 struct timekeeper *tk = &tk_core.timekeeper;
1408 unsigned long seq;
1409 u64 ret;
1410
1411 do {
1412 seq = read_seqcount_begin(&tk_core.seq);
1413
1414 ret = tk->tkr_mono.clock->max_idle_ns;
1415
1416 } while (read_seqcount_retry(&tk_core.seq, seq));
1417
1418 return ret;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430void __weak read_persistent_clock(struct timespec *ts)
1431{
1432 ts->tv_sec = 0;
1433 ts->tv_nsec = 0;
1434}
1435
1436void __weak read_persistent_clock64(struct timespec64 *ts64)
1437{
1438 struct timespec ts;
1439
1440 read_persistent_clock(&ts);
1441 *ts64 = timespec_to_timespec64(ts);
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453void __weak read_boot_clock64(struct timespec64 *ts)
1454{
1455 ts->tv_sec = 0;
1456 ts->tv_nsec = 0;
1457}
1458
1459
1460static bool sleeptime_injected;
1461
1462
1463static bool persistent_clock_exists;
1464
1465
1466
1467
1468void __init timekeeping_init(void)
1469{
1470 struct timekeeper *tk = &tk_core.timekeeper;
1471 struct clocksource *clock;
1472 unsigned long flags;
1473 struct timespec64 now, boot, tmp;
1474
1475 read_persistent_clock64(&now);
1476 if (!timespec64_valid_strict(&now)) {
1477 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1478 " Check your CMOS/BIOS settings.\n");
1479 now.tv_sec = 0;
1480 now.tv_nsec = 0;
1481 } else if (now.tv_sec || now.tv_nsec)
1482 persistent_clock_exists = true;
1483
1484 read_boot_clock64(&boot);
1485 if (!timespec64_valid_strict(&boot)) {
1486 pr_warn("WARNING: Boot clock returned invalid value!\n"
1487 " Check your CMOS/BIOS settings.\n");
1488 boot.tv_sec = 0;
1489 boot.tv_nsec = 0;
1490 }
1491
1492 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1493 write_seqcount_begin(&tk_core.seq);
1494 ntp_init();
1495
1496 clock = clocksource_default_clock();
1497 if (clock->enable)
1498 clock->enable(clock);
1499 tk_setup_internals(tk, clock);
1500
1501 tk_set_xtime(tk, &now);
1502 tk->raw_time.tv_sec = 0;
1503 tk->raw_time.tv_nsec = 0;
1504 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1505 boot = tk_xtime(tk);
1506
1507 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1508 tk_set_wall_to_mono(tk, tmp);
1509
1510 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1511
1512 write_seqcount_end(&tk_core.seq);
1513 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1514}
1515
1516
1517static struct timespec64 timekeeping_suspend_time;
1518
1519
1520
1521
1522
1523
1524
1525
1526static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1527 struct timespec64 *delta)
1528{
1529 if (!timespec64_valid_strict(delta)) {
1530 printk_deferred(KERN_WARNING
1531 "__timekeeping_inject_sleeptime: Invalid "
1532 "sleep delta value!\n");
1533 return;
1534 }
1535 tk_xtime_add(tk, delta);
1536 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1537 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1538 tk_debug_account_sleep_time(delta);
1539}
1540
1541#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558bool timekeeping_rtc_skipresume(void)
1559{
1560 return sleeptime_injected;
1561}
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572bool timekeeping_rtc_skipsuspend(void)
1573{
1574 return persistent_clock_exists;
1575}
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1589{
1590 struct timekeeper *tk = &tk_core.timekeeper;
1591 unsigned long flags;
1592
1593 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1594 write_seqcount_begin(&tk_core.seq);
1595
1596 timekeeping_forward_now(tk);
1597
1598 __timekeeping_inject_sleeptime(tk, delta);
1599
1600 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1601
1602 write_seqcount_end(&tk_core.seq);
1603 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1604
1605
1606 clock_was_set();
1607}
1608#endif
1609
1610
1611
1612
1613void timekeeping_resume(void)
1614{
1615 struct timekeeper *tk = &tk_core.timekeeper;
1616 struct clocksource *clock = tk->tkr_mono.clock;
1617 unsigned long flags;
1618 struct timespec64 ts_new, ts_delta;
1619 cycle_t cycle_now, cycle_delta;
1620
1621 sleeptime_injected = false;
1622 read_persistent_clock64(&ts_new);
1623
1624 clockevents_resume();
1625 clocksource_resume();
1626
1627 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1628 write_seqcount_begin(&tk_core.seq);
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642 cycle_now = tk->tkr_mono.read(clock);
1643 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1644 cycle_now > tk->tkr_mono.cycle_last) {
1645 u64 num, max = ULLONG_MAX;
1646 u32 mult = clock->mult;
1647 u32 shift = clock->shift;
1648 s64 nsec = 0;
1649
1650 cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1651 tk->tkr_mono.mask);
1652
1653
1654
1655
1656
1657
1658 do_div(max, mult);
1659 if (cycle_delta > max) {
1660 num = div64_u64(cycle_delta, max);
1661 nsec = (((u64) max * mult) >> shift) * num;
1662 cycle_delta -= num * max;
1663 }
1664 nsec += ((u64) cycle_delta * mult) >> shift;
1665
1666 ts_delta = ns_to_timespec64(nsec);
1667 sleeptime_injected = true;
1668 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1669 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1670 sleeptime_injected = true;
1671 }
1672
1673 if (sleeptime_injected)
1674 __timekeeping_inject_sleeptime(tk, &ts_delta);
1675
1676
1677 tk->tkr_mono.cycle_last = cycle_now;
1678 tk->tkr_raw.cycle_last = cycle_now;
1679
1680 tk->ntp_error = 0;
1681 timekeeping_suspended = 0;
1682 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1683 write_seqcount_end(&tk_core.seq);
1684 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1685
1686 touch_softlockup_watchdog();
1687
1688 tick_resume();
1689 hrtimers_resume();
1690}
1691
1692int timekeeping_suspend(void)
1693{
1694 struct timekeeper *tk = &tk_core.timekeeper;
1695 unsigned long flags;
1696 struct timespec64 delta, delta_delta;
1697 static struct timespec64 old_delta;
1698
1699 read_persistent_clock64(&timekeeping_suspend_time);
1700
1701
1702
1703
1704
1705
1706 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1707 persistent_clock_exists = true;
1708
1709 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1710 write_seqcount_begin(&tk_core.seq);
1711 timekeeping_forward_now(tk);
1712 timekeeping_suspended = 1;
1713
1714 if (persistent_clock_exists) {
1715
1716
1717
1718
1719
1720
1721 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1722 delta_delta = timespec64_sub(delta, old_delta);
1723 if (abs(delta_delta.tv_sec) >= 2) {
1724
1725
1726
1727
1728 old_delta = delta;
1729 } else {
1730
1731 timekeeping_suspend_time =
1732 timespec64_add(timekeeping_suspend_time, delta_delta);
1733 }
1734 }
1735
1736 timekeeping_update(tk, TK_MIRROR);
1737 halt_fast_timekeeper(tk);
1738 write_seqcount_end(&tk_core.seq);
1739 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1740
1741 tick_suspend();
1742 clocksource_suspend();
1743 clockevents_suspend();
1744
1745 return 0;
1746}
1747
1748
1749static struct syscore_ops timekeeping_syscore_ops = {
1750 .resume = timekeeping_resume,
1751 .suspend = timekeeping_suspend,
1752};
1753
1754static int __init timekeeping_init_ops(void)
1755{
1756 register_syscore_ops(&timekeeping_syscore_ops);
1757 return 0;
1758}
1759device_initcall(timekeeping_init_ops);
1760
1761
1762
1763
1764static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1765 s64 offset,
1766 bool negative,
1767 int adj_scale)
1768{
1769 s64 interval = tk->cycle_interval;
1770 s32 mult_adj = 1;
1771
1772 if (negative) {
1773 mult_adj = -mult_adj;
1774 interval = -interval;
1775 offset = -offset;
1776 }
1777 mult_adj <<= adj_scale;
1778 interval <<= adj_scale;
1779 offset <<= adj_scale;
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1831
1832 WARN_ON_ONCE(1);
1833 return;
1834 }
1835
1836 tk->tkr_mono.mult += mult_adj;
1837 tk->xtime_interval += interval;
1838 tk->tkr_mono.xtime_nsec -= offset;
1839 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1840}
1841
1842
1843
1844
1845
1846static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1847 s64 offset)
1848{
1849 s64 interval = tk->cycle_interval;
1850 s64 xinterval = tk->xtime_interval;
1851 u32 base = tk->tkr_mono.clock->mult;
1852 u32 max = tk->tkr_mono.clock->maxadj;
1853 u32 cur_adj = tk->tkr_mono.mult;
1854 s64 tick_error;
1855 bool negative;
1856 u32 adj_scale;
1857
1858
1859 if (tk->ntp_err_mult)
1860 xinterval -= tk->cycle_interval;
1861
1862 tk->ntp_tick = ntp_tick_length();
1863
1864
1865 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1866 tick_error -= (xinterval + tk->xtime_remainder);
1867
1868
1869 if (likely((tick_error >= 0) && (tick_error <= interval)))
1870 return;
1871
1872
1873 negative = (tick_error < 0);
1874
1875
1876 if (negative && (cur_adj - 1) <= (base - max))
1877 return;
1878 if (!negative && (cur_adj + 1) >= (base + max))
1879 return;
1880
1881
1882
1883
1884
1885 adj_scale = 0;
1886 tick_error = abs(tick_error);
1887 while (tick_error > interval) {
1888 u32 adj = 1 << (adj_scale + 1);
1889
1890
1891 if (negative && (cur_adj - adj) <= (base - max))
1892 break;
1893 if (!negative && (cur_adj + adj) >= (base + max))
1894 break;
1895
1896 adj_scale++;
1897 tick_error >>= 1;
1898 }
1899
1900
1901 timekeeping_apply_adjustment(tk, offset, negative, adj_scale);
1902}
1903
1904
1905
1906
1907
1908static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1909{
1910
1911 timekeeping_freqadjust(tk, offset);
1912
1913
1914 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1915 tk->ntp_err_mult = 1;
1916 timekeeping_apply_adjustment(tk, offset, 0, 0);
1917 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1918
1919 timekeeping_apply_adjustment(tk, offset, 1, 0);
1920 tk->ntp_err_mult = 0;
1921 }
1922
1923 if (unlikely(tk->tkr_mono.clock->maxadj &&
1924 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1925 > tk->tkr_mono.clock->maxadj))) {
1926 printk_once(KERN_WARNING
1927 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1928 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1929 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1930 }
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1947 s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1948 tk->tkr_mono.xtime_nsec = 0;
1949 tk->ntp_error += neg << tk->ntp_error_shift;
1950 }
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1962{
1963 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1964 unsigned int clock_set = 0;
1965
1966 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1967 int leap;
1968
1969 tk->tkr_mono.xtime_nsec -= nsecps;
1970 tk->xtime_sec++;
1971
1972
1973 leap = second_overflow(tk->xtime_sec);
1974 if (unlikely(leap)) {
1975 struct timespec64 ts;
1976
1977 tk->xtime_sec += leap;
1978
1979 ts.tv_sec = leap;
1980 ts.tv_nsec = 0;
1981 tk_set_wall_to_mono(tk,
1982 timespec64_sub(tk->wall_to_monotonic, ts));
1983
1984 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1985
1986 clock_set = TK_CLOCK_WAS_SET;
1987 }
1988 }
1989 return clock_set;
1990}
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
2002 u32 shift,
2003 unsigned int *clock_set)
2004{
2005 cycle_t interval = tk->cycle_interval << shift;
2006 u64 raw_nsecs;
2007
2008
2009 if (offset < interval)
2010 return offset;
2011
2012
2013 offset -= interval;
2014 tk->tkr_mono.cycle_last += interval;
2015 tk->tkr_raw.cycle_last += interval;
2016
2017 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2018 *clock_set |= accumulate_nsecs_to_secs(tk);
2019
2020
2021 raw_nsecs = (u64)tk->raw_interval << shift;
2022 raw_nsecs += tk->raw_time.tv_nsec;
2023 if (raw_nsecs >= NSEC_PER_SEC) {
2024 u64 raw_secs = raw_nsecs;
2025 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
2026 tk->raw_time.tv_sec += raw_secs;
2027 }
2028 tk->raw_time.tv_nsec = raw_nsecs;
2029
2030
2031 tk->ntp_error += tk->ntp_tick << shift;
2032 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2033 (tk->ntp_error_shift + shift);
2034
2035 return offset;
2036}
2037
2038
2039
2040
2041
2042void update_wall_time(void)
2043{
2044 struct timekeeper *real_tk = &tk_core.timekeeper;
2045 struct timekeeper *tk = &shadow_timekeeper;
2046 cycle_t offset;
2047 int shift = 0, maxshift;
2048 unsigned int clock_set = 0;
2049 unsigned long flags;
2050
2051 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2052
2053
2054 if (unlikely(timekeeping_suspended))
2055 goto out;
2056
2057#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2058 offset = real_tk->cycle_interval;
2059#else
2060 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
2061 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2062#endif
2063
2064
2065 if (offset < real_tk->cycle_interval)
2066 goto out;
2067
2068
2069 timekeeping_check_update(real_tk, offset);
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2080 shift = max(0, shift);
2081
2082 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2083 shift = min(shift, maxshift);
2084 while (offset >= tk->cycle_interval) {
2085 offset = logarithmic_accumulation(tk, offset, shift,
2086 &clock_set);
2087 if (offset < tk->cycle_interval<<shift)
2088 shift--;
2089 }
2090
2091
2092 timekeeping_adjust(tk, offset);
2093
2094
2095
2096
2097
2098 old_vsyscall_fixup(tk);
2099
2100
2101
2102
2103
2104 clock_set |= accumulate_nsecs_to_secs(tk);
2105
2106 write_seqcount_begin(&tk_core.seq);
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117 timekeeping_update(tk, clock_set);
2118 memcpy(real_tk, tk, sizeof(*tk));
2119
2120 write_seqcount_end(&tk_core.seq);
2121out:
2122 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2123 if (clock_set)
2124
2125 clock_was_set_delayed();
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139void getboottime64(struct timespec64 *ts)
2140{
2141 struct timekeeper *tk = &tk_core.timekeeper;
2142 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2143
2144 *ts = ktime_to_timespec64(t);
2145}
2146EXPORT_SYMBOL_GPL(getboottime64);
2147
2148unsigned long get_seconds(void)
2149{
2150 struct timekeeper *tk = &tk_core.timekeeper;
2151
2152 return tk->xtime_sec;
2153}
2154EXPORT_SYMBOL(get_seconds);
2155
2156struct timespec __current_kernel_time(void)
2157{
2158 struct timekeeper *tk = &tk_core.timekeeper;
2159
2160 return timespec64_to_timespec(tk_xtime(tk));
2161}
2162
2163struct timespec64 current_kernel_time64(void)
2164{
2165 struct timekeeper *tk = &tk_core.timekeeper;
2166 struct timespec64 now;
2167 unsigned long seq;
2168
2169 do {
2170 seq = read_seqcount_begin(&tk_core.seq);
2171
2172 now = tk_xtime(tk);
2173 } while (read_seqcount_retry(&tk_core.seq, seq));
2174
2175 return now;
2176}
2177EXPORT_SYMBOL(current_kernel_time64);
2178
2179struct timespec64 get_monotonic_coarse64(void)
2180{
2181 struct timekeeper *tk = &tk_core.timekeeper;
2182 struct timespec64 now, mono;
2183 unsigned long seq;
2184
2185 do {
2186 seq = read_seqcount_begin(&tk_core.seq);
2187
2188 now = tk_xtime(tk);
2189 mono = tk->wall_to_monotonic;
2190 } while (read_seqcount_retry(&tk_core.seq, seq));
2191
2192 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
2193 now.tv_nsec + mono.tv_nsec);
2194
2195 return now;
2196}
2197EXPORT_SYMBOL(get_monotonic_coarse64);
2198
2199
2200
2201
2202void do_timer(unsigned long ticks)
2203{
2204 jiffies_64 += ticks;
2205 calc_global_load(ticks);
2206}
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2222 ktime_t *offs_boot, ktime_t *offs_tai)
2223{
2224 struct timekeeper *tk = &tk_core.timekeeper;
2225 unsigned int seq;
2226 ktime_t base;
2227 u64 nsecs;
2228
2229 do {
2230 seq = read_seqcount_begin(&tk_core.seq);
2231
2232 base = tk->tkr_mono.base;
2233 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2234 base = ktime_add_ns(base, nsecs);
2235
2236 if (*cwsseq != tk->clock_was_set_seq) {
2237 *cwsseq = tk->clock_was_set_seq;
2238 *offs_real = tk->offs_real;
2239 *offs_boot = tk->offs_boot;
2240 *offs_tai = tk->offs_tai;
2241 }
2242
2243
2244 if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64))
2245 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2246
2247 } while (read_seqcount_retry(&tk_core.seq, seq));
2248
2249 return base;
2250}
2251
2252
2253
2254
2255int do_adjtimex(struct timex *txc)
2256{
2257 struct timekeeper *tk = &tk_core.timekeeper;
2258 unsigned long flags;
2259 struct timespec64 ts;
2260 s32 orig_tai, tai;
2261 int ret;
2262
2263
2264 ret = ntp_validate_timex(txc);
2265 if (ret)
2266 return ret;
2267
2268 if (txc->modes & ADJ_SETOFFSET) {
2269 struct timespec delta;
2270 delta.tv_sec = txc->time.tv_sec;
2271 delta.tv_nsec = txc->time.tv_usec;
2272 if (!(txc->modes & ADJ_NANO))
2273 delta.tv_nsec *= 1000;
2274 ret = timekeeping_inject_offset(&delta);
2275 if (ret)
2276 return ret;
2277 }
2278
2279 getnstimeofday64(&ts);
2280
2281 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2282 write_seqcount_begin(&tk_core.seq);
2283
2284 orig_tai = tai = tk->tai_offset;
2285 ret = __do_adjtimex(txc, &ts, &tai);
2286
2287 if (tai != orig_tai) {
2288 __timekeeping_set_tai_offset(tk, tai);
2289 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2290 }
2291 tk_update_leap_state(tk);
2292
2293 write_seqcount_end(&tk_core.seq);
2294 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2295
2296 if (tai != orig_tai)
2297 clock_was_set();
2298
2299 ntp_notify_cmos_timer();
2300
2301 return ret;
2302}
2303
2304#ifdef CONFIG_NTP_PPS
2305
2306
2307
2308void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2309{
2310 unsigned long flags;
2311
2312 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2313 write_seqcount_begin(&tk_core.seq);
2314
2315 __hardpps(phase_ts, raw_ts);
2316
2317 write_seqcount_end(&tk_core.seq);
2318 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2319}
2320EXPORT_SYMBOL(hardpps);
2321#endif
2322
2323
2324
2325
2326
2327
2328
2329void xtime_update(unsigned long ticks)
2330{
2331 write_seqlock(&jiffies_lock);
2332 do_timer(ticks);
2333 write_sequnlock(&jiffies_lock);
2334 update_wall_time();
2335}
2336