1
2
3
4
5
6
7
8
9
10
11#include <linux/timekeeper_internal.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/syscore_ops.h>
19#include <linux/clocksource.h>
20#include <linux/jiffies.h>
21#include <linux/time.h>
22#include <linux/tick.h>
23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h>
25#include <linux/compiler.h>
26
27#include "tick-internal.h"
28#include "ntp_internal.h"
29#include "timekeeping_internal.h"
30
31#define TK_CLEAR_NTP (1 << 0)
32#define TK_MIRROR (1 << 1)
33#define TK_CLOCK_WAS_SET (1 << 2)
34
35
36
37
38
39static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42} tk_core ____cacheline_aligned;
43
44static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45static struct timekeeper shadow_timekeeper;
46
47
48
49
50
51
52
53
54
55
56struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59};
60
61static struct tk_fast tk_fast_mono ____cacheline_aligned;
62static struct tk_fast tk_fast_raw ____cacheline_aligned;
63
64
65int __read_mostly timekeeping_suspended;
66
67static inline void tk_normalize_xtime(struct timekeeper *tk)
68{
69 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
70 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
71 tk->xtime_sec++;
72 }
73}
74
75static inline struct timespec64 tk_xtime(struct timekeeper *tk)
76{
77 struct timespec64 ts;
78
79 ts.tv_sec = tk->xtime_sec;
80 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
81 return ts;
82}
83
84static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
85{
86 tk->xtime_sec = ts->tv_sec;
87 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
88}
89
90static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
91{
92 tk->xtime_sec += ts->tv_sec;
93 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
94 tk_normalize_xtime(tk);
95}
96
97static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
98{
99 struct timespec64 tmp;
100
101
102
103
104
105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106 -tk->wall_to_monotonic.tv_nsec);
107 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
108 tk->wall_to_monotonic = wtm;
109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
110 tk->offs_real = timespec64_to_ktime(tmp);
111 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
112}
113
114static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
115{
116 tk->offs_boot = ktime_add(tk->offs_boot, delta);
117}
118
119#ifdef CONFIG_DEBUG_TIMEKEEPING
120#define WARNING_FREQ (HZ*300)
121
122static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
123{
124
125 cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
126 const char *name = tk->tkr_mono.clock->name;
127
128 if (offset > max_cycles) {
129 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
130 offset, name, max_cycles);
131 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
132 } else {
133 if (offset > (max_cycles >> 1)) {
134 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
135 offset, name, max_cycles >> 1);
136 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
137 }
138 }
139
140 if (tk->underflow_seen) {
141 if (jiffies - tk->last_warning > WARNING_FREQ) {
142 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
143 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
144 printk_deferred(" Your kernel is probably still fine.\n");
145 tk->last_warning = jiffies;
146 }
147 tk->underflow_seen = 0;
148 }
149
150 if (tk->overflow_seen) {
151 if (jiffies - tk->last_warning > WARNING_FREQ) {
152 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
153 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
154 printk_deferred(" Your kernel is probably still fine.\n");
155 tk->last_warning = jiffies;
156 }
157 tk->overflow_seen = 0;
158 }
159}
160
161static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
162{
163 struct timekeeper *tk = &tk_core.timekeeper;
164 cycle_t now, last, mask, max, delta;
165 unsigned int seq;
166
167
168
169
170
171
172
173
174 do {
175 seq = read_seqcount_begin(&tk_core.seq);
176 now = tkr->read(tkr->clock);
177 last = tkr->cycle_last;
178 mask = tkr->mask;
179 max = tkr->clock->max_cycles;
180 } while (read_seqcount_retry(&tk_core.seq, seq));
181
182 delta = clocksource_delta(now, last, mask);
183
184
185
186
187
188 if (unlikely((~delta & mask) < (mask >> 3))) {
189 tk->underflow_seen = 1;
190 delta = 0;
191 }
192
193
194 if (unlikely(delta > max)) {
195 tk->overflow_seen = 1;
196 delta = tkr->clock->max_cycles;
197 }
198
199 return delta;
200}
201#else
202static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
203{
204}
205static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
206{
207 cycle_t cycle_now, delta;
208
209
210 cycle_now = tkr->read(tkr->clock);
211
212
213 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
214
215 return delta;
216}
217#endif
218
219
220
221
222
223
224
225
226
227
228
229
230static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
231{
232 cycle_t interval;
233 u64 tmp, ntpinterval;
234 struct clocksource *old_clock;
235
236 ++tk->cs_was_changed_seq;
237 old_clock = tk->tkr_mono.clock;
238 tk->tkr_mono.clock = clock;
239 tk->tkr_mono.read = clock->read;
240 tk->tkr_mono.mask = clock->mask;
241 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
242
243 tk->tkr_raw.clock = clock;
244 tk->tkr_raw.read = clock->read;
245 tk->tkr_raw.mask = clock->mask;
246 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
247
248
249 tmp = NTP_INTERVAL_LENGTH;
250 tmp <<= clock->shift;
251 ntpinterval = tmp;
252 tmp += clock->mult/2;
253 do_div(tmp, clock->mult);
254 if (tmp == 0)
255 tmp = 1;
256
257 interval = (cycle_t) tmp;
258 tk->cycle_interval = interval;
259
260
261 tk->xtime_interval = (u64) interval * clock->mult;
262 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
263 tk->raw_interval =
264 ((u64) interval * clock->mult) >> clock->shift;
265
266
267 if (old_clock) {
268 int shift_change = clock->shift - old_clock->shift;
269 if (shift_change < 0)
270 tk->tkr_mono.xtime_nsec >>= -shift_change;
271 else
272 tk->tkr_mono.xtime_nsec <<= shift_change;
273 }
274 tk->tkr_raw.xtime_nsec = 0;
275
276 tk->tkr_mono.shift = clock->shift;
277 tk->tkr_raw.shift = clock->shift;
278
279 tk->ntp_error = 0;
280 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
281 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
282
283
284
285
286
287
288 tk->tkr_mono.mult = clock->mult;
289 tk->tkr_raw.mult = clock->mult;
290 tk->ntp_err_mult = 0;
291}
292
293
294
295#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
296static u32 default_arch_gettimeoffset(void) { return 0; }
297u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
298#else
299static inline u32 arch_gettimeoffset(void) { return 0; }
300#endif
301
302static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
303 cycle_t delta)
304{
305 s64 nsec;
306
307 nsec = delta * tkr->mult + tkr->xtime_nsec;
308 nsec >>= tkr->shift;
309
310
311 return nsec + arch_gettimeoffset();
312}
313
314static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
315{
316 cycle_t delta;
317
318 delta = timekeeping_get_delta(tkr);
319 return timekeeping_delta_to_ns(tkr, delta);
320}
321
322static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr,
323 cycle_t cycles)
324{
325 cycle_t delta;
326
327
328 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
329 return timekeeping_delta_to_ns(tkr, delta);
330}
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
347{
348 struct tk_read_base *base = tkf->base;
349
350
351 raw_write_seqcount_latch(&tkf->seq);
352
353
354 memcpy(base, tkr, sizeof(*base));
355
356
357 raw_write_seqcount_latch(&tkf->seq);
358
359
360 memcpy(base + 1, base, sizeof(*base));
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
396{
397 struct tk_read_base *tkr;
398 unsigned int seq;
399 u64 now;
400
401 do {
402 seq = raw_read_seqcount_latch(&tkf->seq);
403 tkr = tkf->base + (seq & 0x01);
404 now = ktime_to_ns(tkr->base);
405
406 now += clocksource_delta(tkr->read(tkr->clock),
407 tkr->cycle_last, tkr->mask);
408 } while (read_seqcount_retry(&tkf->seq, seq));
409
410 return now;
411}
412
413u64 ktime_get_mono_fast_ns(void)
414{
415 return __ktime_get_fast_ns(&tk_fast_mono);
416}
417EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
418
419u64 ktime_get_raw_fast_ns(void)
420{
421 return __ktime_get_fast_ns(&tk_fast_raw);
422}
423EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
424
425
426static cycle_t cycles_at_suspend;
427
428static cycle_t dummy_clock_read(struct clocksource *cs)
429{
430 return cycles_at_suspend;
431}
432
433
434
435
436
437
438
439
440
441
442
443static void halt_fast_timekeeper(struct timekeeper *tk)
444{
445 static struct tk_read_base tkr_dummy;
446 struct tk_read_base *tkr = &tk->tkr_mono;
447
448 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
449 cycles_at_suspend = tkr->read(tkr->clock);
450 tkr_dummy.read = dummy_clock_read;
451 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
452
453 tkr = &tk->tkr_raw;
454 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
455 tkr_dummy.read = dummy_clock_read;
456 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
457}
458
459#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
460
461static inline void update_vsyscall(struct timekeeper *tk)
462{
463 struct timespec xt, wm;
464
465 xt = timespec64_to_timespec(tk_xtime(tk));
466 wm = timespec64_to_timespec(tk->wall_to_monotonic);
467 update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
468 tk->tkr_mono.cycle_last);
469}
470
471static inline void old_vsyscall_fixup(struct timekeeper *tk)
472{
473 s64 remainder;
474
475
476
477
478
479
480
481
482
483
484
485 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
486 if (remainder != 0) {
487 tk->tkr_mono.xtime_nsec -= remainder;
488 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
489 tk->ntp_error += remainder << tk->ntp_error_shift;
490 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
491 }
492}
493#else
494#define old_vsyscall_fixup(tk)
495#endif
496
497static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
498
499static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
500{
501 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
502}
503
504
505
506
507int pvclock_gtod_register_notifier(struct notifier_block *nb)
508{
509 struct timekeeper *tk = &tk_core.timekeeper;
510 unsigned long flags;
511 int ret;
512
513 raw_spin_lock_irqsave(&timekeeper_lock, flags);
514 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
515 update_pvclock_gtod(tk, true);
516 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
517
518 return ret;
519}
520EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
521
522
523
524
525
526int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
527{
528 unsigned long flags;
529 int ret;
530
531 raw_spin_lock_irqsave(&timekeeper_lock, flags);
532 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
533 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
534
535 return ret;
536}
537EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
538
539
540
541
542static inline void tk_update_leap_state(struct timekeeper *tk)
543{
544 tk->next_leap_ktime = ntp_get_next_leap();
545 if (tk->next_leap_ktime.tv64 != KTIME_MAX)
546
547 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
548}
549
550
551
552
553static inline void tk_update_ktime_data(struct timekeeper *tk)
554{
555 u64 seconds;
556 u32 nsec;
557
558
559
560
561
562
563
564
565 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
566 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
567 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
568
569
570 tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
571
572
573
574
575
576
577 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
578 if (nsec >= NSEC_PER_SEC)
579 seconds++;
580 tk->ktime_sec = seconds;
581}
582
583
584static void timekeeping_update(struct timekeeper *tk, unsigned int action)
585{
586 if (action & TK_CLEAR_NTP) {
587 tk->ntp_error = 0;
588 ntp_clear();
589 }
590
591 tk_update_leap_state(tk);
592 tk_update_ktime_data(tk);
593
594 update_vsyscall(tk);
595 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
596
597 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
598 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
599
600 if (action & TK_CLOCK_WAS_SET)
601 tk->clock_was_set_seq++;
602
603
604
605
606
607 if (action & TK_MIRROR)
608 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
609 sizeof(tk_core.timekeeper));
610}
611
612
613
614
615
616
617
618
619static void timekeeping_forward_now(struct timekeeper *tk)
620{
621 struct clocksource *clock = tk->tkr_mono.clock;
622 cycle_t cycle_now, delta;
623 s64 nsec;
624
625 cycle_now = tk->tkr_mono.read(clock);
626 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
627 tk->tkr_mono.cycle_last = cycle_now;
628 tk->tkr_raw.cycle_last = cycle_now;
629
630 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
631
632
633 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
634
635 tk_normalize_xtime(tk);
636
637 nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
638 timespec64_add_ns(&tk->raw_time, nsec);
639}
640
641
642
643
644
645
646
647
648int __getnstimeofday64(struct timespec64 *ts)
649{
650 struct timekeeper *tk = &tk_core.timekeeper;
651 unsigned long seq;
652 s64 nsecs = 0;
653
654 do {
655 seq = read_seqcount_begin(&tk_core.seq);
656
657 ts->tv_sec = tk->xtime_sec;
658 nsecs = timekeeping_get_ns(&tk->tkr_mono);
659
660 } while (read_seqcount_retry(&tk_core.seq, seq));
661
662 ts->tv_nsec = 0;
663 timespec64_add_ns(ts, nsecs);
664
665
666
667
668
669 if (unlikely(timekeeping_suspended))
670 return -EAGAIN;
671 return 0;
672}
673EXPORT_SYMBOL(__getnstimeofday64);
674
675
676
677
678
679
680
681void getnstimeofday64(struct timespec64 *ts)
682{
683 WARN_ON(__getnstimeofday64(ts));
684}
685EXPORT_SYMBOL(getnstimeofday64);
686
687ktime_t ktime_get(void)
688{
689 struct timekeeper *tk = &tk_core.timekeeper;
690 unsigned int seq;
691 ktime_t base;
692 s64 nsecs;
693
694 WARN_ON(timekeeping_suspended);
695
696 do {
697 seq = read_seqcount_begin(&tk_core.seq);
698 base = tk->tkr_mono.base;
699 nsecs = timekeeping_get_ns(&tk->tkr_mono);
700
701 } while (read_seqcount_retry(&tk_core.seq, seq));
702
703 return ktime_add_ns(base, nsecs);
704}
705EXPORT_SYMBOL_GPL(ktime_get);
706
707u32 ktime_get_resolution_ns(void)
708{
709 struct timekeeper *tk = &tk_core.timekeeper;
710 unsigned int seq;
711 u32 nsecs;
712
713 WARN_ON(timekeeping_suspended);
714
715 do {
716 seq = read_seqcount_begin(&tk_core.seq);
717 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
718 } while (read_seqcount_retry(&tk_core.seq, seq));
719
720 return nsecs;
721}
722EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
723
724static ktime_t *offsets[TK_OFFS_MAX] = {
725 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
726 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
727 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
728};
729
730ktime_t ktime_get_with_offset(enum tk_offsets offs)
731{
732 struct timekeeper *tk = &tk_core.timekeeper;
733 unsigned int seq;
734 ktime_t base, *offset = offsets[offs];
735 s64 nsecs;
736
737 WARN_ON(timekeeping_suspended);
738
739 do {
740 seq = read_seqcount_begin(&tk_core.seq);
741 base = ktime_add(tk->tkr_mono.base, *offset);
742 nsecs = timekeeping_get_ns(&tk->tkr_mono);
743
744 } while (read_seqcount_retry(&tk_core.seq, seq));
745
746 return ktime_add_ns(base, nsecs);
747
748}
749EXPORT_SYMBOL_GPL(ktime_get_with_offset);
750
751
752
753
754
755
756ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
757{
758 ktime_t *offset = offsets[offs];
759 unsigned long seq;
760 ktime_t tconv;
761
762 do {
763 seq = read_seqcount_begin(&tk_core.seq);
764 tconv = ktime_add(tmono, *offset);
765 } while (read_seqcount_retry(&tk_core.seq, seq));
766
767 return tconv;
768}
769EXPORT_SYMBOL_GPL(ktime_mono_to_any);
770
771
772
773
774ktime_t ktime_get_raw(void)
775{
776 struct timekeeper *tk = &tk_core.timekeeper;
777 unsigned int seq;
778 ktime_t base;
779 s64 nsecs;
780
781 do {
782 seq = read_seqcount_begin(&tk_core.seq);
783 base = tk->tkr_raw.base;
784 nsecs = timekeeping_get_ns(&tk->tkr_raw);
785
786 } while (read_seqcount_retry(&tk_core.seq, seq));
787
788 return ktime_add_ns(base, nsecs);
789}
790EXPORT_SYMBOL_GPL(ktime_get_raw);
791
792
793
794
795
796
797
798
799
800void ktime_get_ts64(struct timespec64 *ts)
801{
802 struct timekeeper *tk = &tk_core.timekeeper;
803 struct timespec64 tomono;
804 s64 nsec;
805 unsigned int seq;
806
807 WARN_ON(timekeeping_suspended);
808
809 do {
810 seq = read_seqcount_begin(&tk_core.seq);
811 ts->tv_sec = tk->xtime_sec;
812 nsec = timekeeping_get_ns(&tk->tkr_mono);
813 tomono = tk->wall_to_monotonic;
814
815 } while (read_seqcount_retry(&tk_core.seq, seq));
816
817 ts->tv_sec += tomono.tv_sec;
818 ts->tv_nsec = 0;
819 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
820}
821EXPORT_SYMBOL_GPL(ktime_get_ts64);
822
823
824
825
826
827
828
829
830
831
832time64_t ktime_get_seconds(void)
833{
834 struct timekeeper *tk = &tk_core.timekeeper;
835
836 WARN_ON(timekeeping_suspended);
837 return tk->ktime_sec;
838}
839EXPORT_SYMBOL_GPL(ktime_get_seconds);
840
841
842
843
844
845
846
847
848
849
850
851
852time64_t ktime_get_real_seconds(void)
853{
854 struct timekeeper *tk = &tk_core.timekeeper;
855 time64_t seconds;
856 unsigned int seq;
857
858 if (IS_ENABLED(CONFIG_64BIT))
859 return tk->xtime_sec;
860
861 do {
862 seq = read_seqcount_begin(&tk_core.seq);
863 seconds = tk->xtime_sec;
864
865 } while (read_seqcount_retry(&tk_core.seq, seq));
866
867 return seconds;
868}
869EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
870
871
872
873
874
875
876time64_t __ktime_get_real_seconds(void)
877{
878 struct timekeeper *tk = &tk_core.timekeeper;
879
880 return tk->xtime_sec;
881}
882
883
884
885
886
887void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
888{
889 struct timekeeper *tk = &tk_core.timekeeper;
890 unsigned long seq;
891 ktime_t base_raw;
892 ktime_t base_real;
893 s64 nsec_raw;
894 s64 nsec_real;
895 cycle_t now;
896
897 WARN_ON_ONCE(timekeeping_suspended);
898
899 do {
900 seq = read_seqcount_begin(&tk_core.seq);
901
902 now = tk->tkr_mono.read(tk->tkr_mono.clock);
903 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
904 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
905 base_real = ktime_add(tk->tkr_mono.base,
906 tk_core.timekeeper.offs_real);
907 base_raw = tk->tkr_raw.base;
908 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
909 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
910 } while (read_seqcount_retry(&tk_core.seq, seq));
911
912 systime_snapshot->cycles = now;
913 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
914 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
915}
916EXPORT_SYMBOL_GPL(ktime_get_snapshot);
917
918
919static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
920{
921 u64 tmp, rem;
922
923 tmp = div64_u64_rem(*base, div, &rem);
924
925 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
926 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
927 return -EOVERFLOW;
928 tmp *= mult;
929 rem *= mult;
930
931 do_div(rem, div);
932 *base = tmp + rem;
933 return 0;
934}
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953static int adjust_historical_crosststamp(struct system_time_snapshot *history,
954 cycle_t partial_history_cycles,
955 cycle_t total_history_cycles,
956 bool discontinuity,
957 struct system_device_crosststamp *ts)
958{
959 struct timekeeper *tk = &tk_core.timekeeper;
960 u64 corr_raw, corr_real;
961 bool interp_forward;
962 int ret;
963
964 if (total_history_cycles == 0 || partial_history_cycles == 0)
965 return 0;
966
967
968 interp_forward = partial_history_cycles > total_history_cycles/2 ?
969 true : false;
970 partial_history_cycles = interp_forward ?
971 total_history_cycles - partial_history_cycles :
972 partial_history_cycles;
973
974
975
976
977
978 corr_raw = (u64)ktime_to_ns(
979 ktime_sub(ts->sys_monoraw, history->raw));
980 ret = scale64_check_overflow(partial_history_cycles,
981 total_history_cycles, &corr_raw);
982 if (ret)
983 return ret;
984
985
986
987
988
989
990
991
992 if (discontinuity) {
993 corr_real = mul_u64_u32_div
994 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
995 } else {
996 corr_real = (u64)ktime_to_ns(
997 ktime_sub(ts->sys_realtime, history->real));
998 ret = scale64_check_overflow(partial_history_cycles,
999 total_history_cycles, &corr_real);
1000 if (ret)
1001 return ret;
1002 }
1003
1004
1005 if (interp_forward) {
1006 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1007 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1008 } else {
1009 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1010 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1011 }
1012
1013 return 0;
1014}
1015
1016
1017
1018
1019static bool cycle_between(cycle_t before, cycle_t test, cycle_t after)
1020{
1021 if (test > before && test < after)
1022 return true;
1023 if (test < before && before > after)
1024 return true;
1025 return false;
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039int get_device_system_crosststamp(int (*get_time_fn)
1040 (ktime_t *device_time,
1041 struct system_counterval_t *sys_counterval,
1042 void *ctx),
1043 void *ctx,
1044 struct system_time_snapshot *history_begin,
1045 struct system_device_crosststamp *xtstamp)
1046{
1047 struct system_counterval_t system_counterval;
1048 struct timekeeper *tk = &tk_core.timekeeper;
1049 cycle_t cycles, now, interval_start;
1050 unsigned int clock_was_set_seq = 0;
1051 ktime_t base_real, base_raw;
1052 s64 nsec_real, nsec_raw;
1053 u8 cs_was_changed_seq;
1054 unsigned long seq;
1055 bool do_interp;
1056 int ret;
1057
1058 do {
1059 seq = read_seqcount_begin(&tk_core.seq);
1060
1061
1062
1063
1064 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1065 if (ret)
1066 return ret;
1067
1068
1069
1070
1071
1072
1073 if (tk->tkr_mono.clock != system_counterval.cs)
1074 return -ENODEV;
1075 cycles = system_counterval.cycles;
1076
1077
1078
1079
1080
1081 now = tk->tkr_mono.read(tk->tkr_mono.clock);
1082 interval_start = tk->tkr_mono.cycle_last;
1083 if (!cycle_between(interval_start, cycles, now)) {
1084 clock_was_set_seq = tk->clock_was_set_seq;
1085 cs_was_changed_seq = tk->cs_was_changed_seq;
1086 cycles = interval_start;
1087 do_interp = true;
1088 } else {
1089 do_interp = false;
1090 }
1091
1092 base_real = ktime_add(tk->tkr_mono.base,
1093 tk_core.timekeeper.offs_real);
1094 base_raw = tk->tkr_raw.base;
1095
1096 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1097 system_counterval.cycles);
1098 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1099 system_counterval.cycles);
1100 } while (read_seqcount_retry(&tk_core.seq, seq));
1101
1102 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1103 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1104
1105
1106
1107
1108
1109 if (do_interp) {
1110 cycle_t partial_history_cycles, total_history_cycles;
1111 bool discontinuity;
1112
1113
1114
1115
1116
1117
1118 if (!history_begin ||
1119 !cycle_between(history_begin->cycles,
1120 system_counterval.cycles, cycles) ||
1121 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1122 return -EINVAL;
1123 partial_history_cycles = cycles - system_counterval.cycles;
1124 total_history_cycles = cycles - history_begin->cycles;
1125 discontinuity =
1126 history_begin->clock_was_set_seq != clock_was_set_seq;
1127
1128 ret = adjust_historical_crosststamp(history_begin,
1129 partial_history_cycles,
1130 total_history_cycles,
1131 discontinuity, xtstamp);
1132 if (ret)
1133 return ret;
1134 }
1135
1136 return 0;
1137}
1138EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1139
1140
1141
1142
1143
1144
1145
1146void do_gettimeofday(struct timeval *tv)
1147{
1148 struct timespec64 now;
1149
1150 getnstimeofday64(&now);
1151 tv->tv_sec = now.tv_sec;
1152 tv->tv_usec = now.tv_nsec/1000;
1153}
1154EXPORT_SYMBOL(do_gettimeofday);
1155
1156
1157
1158
1159
1160
1161
1162int do_settimeofday64(const struct timespec64 *ts)
1163{
1164 struct timekeeper *tk = &tk_core.timekeeper;
1165 struct timespec64 ts_delta, xt;
1166 unsigned long flags;
1167 int ret = 0;
1168
1169 if (!timespec64_valid_strict(ts))
1170 return -EINVAL;
1171
1172 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1173 write_seqcount_begin(&tk_core.seq);
1174
1175 timekeeping_forward_now(tk);
1176
1177 xt = tk_xtime(tk);
1178 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1179 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1180
1181 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1182 ret = -EINVAL;
1183 goto out;
1184 }
1185
1186 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1187
1188 tk_set_xtime(tk, ts);
1189out:
1190 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1191
1192 write_seqcount_end(&tk_core.seq);
1193 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1194
1195
1196 clock_was_set();
1197
1198 return ret;
1199}
1200EXPORT_SYMBOL(do_settimeofday64);
1201
1202
1203
1204
1205
1206
1207
1208int timekeeping_inject_offset(struct timespec *ts)
1209{
1210 struct timekeeper *tk = &tk_core.timekeeper;
1211 unsigned long flags;
1212 struct timespec64 ts64, tmp;
1213 int ret = 0;
1214
1215 if (!timespec_inject_offset_valid(ts))
1216 return -EINVAL;
1217
1218 ts64 = timespec_to_timespec64(*ts);
1219
1220 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1221 write_seqcount_begin(&tk_core.seq);
1222
1223 timekeeping_forward_now(tk);
1224
1225
1226 tmp = timespec64_add(tk_xtime(tk), ts64);
1227 if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
1228 !timespec64_valid_strict(&tmp)) {
1229 ret = -EINVAL;
1230 goto error;
1231 }
1232
1233 tk_xtime_add(tk, &ts64);
1234 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
1235
1236error:
1237 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1238
1239 write_seqcount_end(&tk_core.seq);
1240 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1241
1242
1243 clock_was_set();
1244
1245 return ret;
1246}
1247EXPORT_SYMBOL(timekeeping_inject_offset);
1248
1249
1250
1251
1252
1253
1254s32 timekeeping_get_tai_offset(void)
1255{
1256 struct timekeeper *tk = &tk_core.timekeeper;
1257 unsigned int seq;
1258 s32 ret;
1259
1260 do {
1261 seq = read_seqcount_begin(&tk_core.seq);
1262 ret = tk->tai_offset;
1263 } while (read_seqcount_retry(&tk_core.seq, seq));
1264
1265 return ret;
1266}
1267
1268
1269
1270
1271
1272static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1273{
1274 tk->tai_offset = tai_offset;
1275 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1276}
1277
1278
1279
1280
1281
1282void timekeeping_set_tai_offset(s32 tai_offset)
1283{
1284 struct timekeeper *tk = &tk_core.timekeeper;
1285 unsigned long flags;
1286
1287 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1288 write_seqcount_begin(&tk_core.seq);
1289 __timekeeping_set_tai_offset(tk, tai_offset);
1290 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1291 write_seqcount_end(&tk_core.seq);
1292 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1293 clock_was_set();
1294}
1295
1296
1297
1298
1299
1300
1301static int change_clocksource(void *data)
1302{
1303 struct timekeeper *tk = &tk_core.timekeeper;
1304 struct clocksource *new, *old;
1305 unsigned long flags;
1306
1307 new = (struct clocksource *) data;
1308
1309 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1310 write_seqcount_begin(&tk_core.seq);
1311
1312 timekeeping_forward_now(tk);
1313
1314
1315
1316
1317 if (try_module_get(new->owner)) {
1318 if (!new->enable || new->enable(new) == 0) {
1319 old = tk->tkr_mono.clock;
1320 tk_setup_internals(tk, new);
1321 if (old->disable)
1322 old->disable(old);
1323 module_put(old->owner);
1324 } else {
1325 module_put(new->owner);
1326 }
1327 }
1328 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1329
1330 write_seqcount_end(&tk_core.seq);
1331 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1332
1333 return 0;
1334}
1335
1336
1337
1338
1339
1340
1341
1342
1343int timekeeping_notify(struct clocksource *clock)
1344{
1345 struct timekeeper *tk = &tk_core.timekeeper;
1346
1347 if (tk->tkr_mono.clock == clock)
1348 return 0;
1349 stop_machine(change_clocksource, clock, NULL);
1350 tick_clock_notify();
1351 return tk->tkr_mono.clock == clock ? 0 : -1;
1352}
1353
1354
1355
1356
1357
1358
1359
1360void getrawmonotonic64(struct timespec64 *ts)
1361{
1362 struct timekeeper *tk = &tk_core.timekeeper;
1363 struct timespec64 ts64;
1364 unsigned long seq;
1365 s64 nsecs;
1366
1367 do {
1368 seq = read_seqcount_begin(&tk_core.seq);
1369 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1370 ts64 = tk->raw_time;
1371
1372 } while (read_seqcount_retry(&tk_core.seq, seq));
1373
1374 timespec64_add_ns(&ts64, nsecs);
1375 *ts = ts64;
1376}
1377EXPORT_SYMBOL(getrawmonotonic64);
1378
1379
1380
1381
1382
1383int timekeeping_valid_for_hres(void)
1384{
1385 struct timekeeper *tk = &tk_core.timekeeper;
1386 unsigned long seq;
1387 int ret;
1388
1389 do {
1390 seq = read_seqcount_begin(&tk_core.seq);
1391
1392 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1393
1394 } while (read_seqcount_retry(&tk_core.seq, seq));
1395
1396 return ret;
1397}
1398
1399
1400
1401
1402u64 timekeeping_max_deferment(void)
1403{
1404 struct timekeeper *tk = &tk_core.timekeeper;
1405 unsigned long seq;
1406 u64 ret;
1407
1408 do {
1409 seq = read_seqcount_begin(&tk_core.seq);
1410
1411 ret = tk->tkr_mono.clock->max_idle_ns;
1412
1413 } while (read_seqcount_retry(&tk_core.seq, seq));
1414
1415 return ret;
1416}
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427void __weak read_persistent_clock(struct timespec *ts)
1428{
1429 ts->tv_sec = 0;
1430 ts->tv_nsec = 0;
1431}
1432
1433void __weak read_persistent_clock64(struct timespec64 *ts64)
1434{
1435 struct timespec ts;
1436
1437 read_persistent_clock(&ts);
1438 *ts64 = timespec_to_timespec64(ts);
1439}
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450void __weak read_boot_clock64(struct timespec64 *ts)
1451{
1452 ts->tv_sec = 0;
1453 ts->tv_nsec = 0;
1454}
1455
1456
1457static bool sleeptime_injected;
1458
1459
1460static bool persistent_clock_exists;
1461
1462
1463
1464
1465void __init timekeeping_init(void)
1466{
1467 struct timekeeper *tk = &tk_core.timekeeper;
1468 struct clocksource *clock;
1469 unsigned long flags;
1470 struct timespec64 now, boot, tmp;
1471
1472 read_persistent_clock64(&now);
1473 if (!timespec64_valid_strict(&now)) {
1474 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1475 " Check your CMOS/BIOS settings.\n");
1476 now.tv_sec = 0;
1477 now.tv_nsec = 0;
1478 } else if (now.tv_sec || now.tv_nsec)
1479 persistent_clock_exists = true;
1480
1481 read_boot_clock64(&boot);
1482 if (!timespec64_valid_strict(&boot)) {
1483 pr_warn("WARNING: Boot clock returned invalid value!\n"
1484 " Check your CMOS/BIOS settings.\n");
1485 boot.tv_sec = 0;
1486 boot.tv_nsec = 0;
1487 }
1488
1489 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1490 write_seqcount_begin(&tk_core.seq);
1491 ntp_init();
1492
1493 clock = clocksource_default_clock();
1494 if (clock->enable)
1495 clock->enable(clock);
1496 tk_setup_internals(tk, clock);
1497
1498 tk_set_xtime(tk, &now);
1499 tk->raw_time.tv_sec = 0;
1500 tk->raw_time.tv_nsec = 0;
1501 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1502 boot = tk_xtime(tk);
1503
1504 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1505 tk_set_wall_to_mono(tk, tmp);
1506
1507 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1508
1509 write_seqcount_end(&tk_core.seq);
1510 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1511}
1512
1513
1514static struct timespec64 timekeeping_suspend_time;
1515
1516
1517
1518
1519
1520
1521
1522
1523static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1524 struct timespec64 *delta)
1525{
1526 if (!timespec64_valid_strict(delta)) {
1527 printk_deferred(KERN_WARNING
1528 "__timekeeping_inject_sleeptime: Invalid "
1529 "sleep delta value!\n");
1530 return;
1531 }
1532 tk_xtime_add(tk, delta);
1533 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1534 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1535 tk_debug_account_sleep_time(delta);
1536}
1537
1538#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555bool timekeeping_rtc_skipresume(void)
1556{
1557 return sleeptime_injected;
1558}
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569bool timekeeping_rtc_skipsuspend(void)
1570{
1571 return persistent_clock_exists;
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1586{
1587 struct timekeeper *tk = &tk_core.timekeeper;
1588 unsigned long flags;
1589
1590 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1591 write_seqcount_begin(&tk_core.seq);
1592
1593 timekeeping_forward_now(tk);
1594
1595 __timekeeping_inject_sleeptime(tk, delta);
1596
1597 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1598
1599 write_seqcount_end(&tk_core.seq);
1600 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1601
1602
1603 clock_was_set();
1604}
1605#endif
1606
1607
1608
1609
1610void timekeeping_resume(void)
1611{
1612 struct timekeeper *tk = &tk_core.timekeeper;
1613 struct clocksource *clock = tk->tkr_mono.clock;
1614 unsigned long flags;
1615 struct timespec64 ts_new, ts_delta;
1616 cycle_t cycle_now, cycle_delta;
1617
1618 sleeptime_injected = false;
1619 read_persistent_clock64(&ts_new);
1620
1621 clockevents_resume();
1622 clocksource_resume();
1623
1624 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1625 write_seqcount_begin(&tk_core.seq);
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639 cycle_now = tk->tkr_mono.read(clock);
1640 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1641 cycle_now > tk->tkr_mono.cycle_last) {
1642 u64 num, max = ULLONG_MAX;
1643 u32 mult = clock->mult;
1644 u32 shift = clock->shift;
1645 s64 nsec = 0;
1646
1647 cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1648 tk->tkr_mono.mask);
1649
1650
1651
1652
1653
1654
1655 do_div(max, mult);
1656 if (cycle_delta > max) {
1657 num = div64_u64(cycle_delta, max);
1658 nsec = (((u64) max * mult) >> shift) * num;
1659 cycle_delta -= num * max;
1660 }
1661 nsec += ((u64) cycle_delta * mult) >> shift;
1662
1663 ts_delta = ns_to_timespec64(nsec);
1664 sleeptime_injected = true;
1665 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1666 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1667 sleeptime_injected = true;
1668 }
1669
1670 if (sleeptime_injected)
1671 __timekeeping_inject_sleeptime(tk, &ts_delta);
1672
1673
1674 tk->tkr_mono.cycle_last = cycle_now;
1675 tk->tkr_raw.cycle_last = cycle_now;
1676
1677 tk->ntp_error = 0;
1678 timekeeping_suspended = 0;
1679 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1680 write_seqcount_end(&tk_core.seq);
1681 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1682
1683 touch_softlockup_watchdog();
1684
1685 tick_resume();
1686 hrtimers_resume();
1687}
1688
1689int timekeeping_suspend(void)
1690{
1691 struct timekeeper *tk = &tk_core.timekeeper;
1692 unsigned long flags;
1693 struct timespec64 delta, delta_delta;
1694 static struct timespec64 old_delta;
1695
1696 read_persistent_clock64(&timekeeping_suspend_time);
1697
1698
1699
1700
1701
1702
1703 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1704 persistent_clock_exists = true;
1705
1706 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1707 write_seqcount_begin(&tk_core.seq);
1708 timekeeping_forward_now(tk);
1709 timekeeping_suspended = 1;
1710
1711 if (persistent_clock_exists) {
1712
1713
1714
1715
1716
1717
1718 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1719 delta_delta = timespec64_sub(delta, old_delta);
1720 if (abs(delta_delta.tv_sec) >= 2) {
1721
1722
1723
1724
1725 old_delta = delta;
1726 } else {
1727
1728 timekeeping_suspend_time =
1729 timespec64_add(timekeeping_suspend_time, delta_delta);
1730 }
1731 }
1732
1733 timekeeping_update(tk, TK_MIRROR);
1734 halt_fast_timekeeper(tk);
1735 write_seqcount_end(&tk_core.seq);
1736 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1737
1738 tick_suspend();
1739 clocksource_suspend();
1740 clockevents_suspend();
1741
1742 return 0;
1743}
1744
1745
1746static struct syscore_ops timekeeping_syscore_ops = {
1747 .resume = timekeeping_resume,
1748 .suspend = timekeeping_suspend,
1749};
1750
1751static int __init timekeeping_init_ops(void)
1752{
1753 register_syscore_ops(&timekeeping_syscore_ops);
1754 return 0;
1755}
1756device_initcall(timekeeping_init_ops);
1757
1758
1759
1760
1761static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1762 s64 offset,
1763 bool negative,
1764 int adj_scale)
1765{
1766 s64 interval = tk->cycle_interval;
1767 s32 mult_adj = 1;
1768
1769 if (negative) {
1770 mult_adj = -mult_adj;
1771 interval = -interval;
1772 offset = -offset;
1773 }
1774 mult_adj <<= adj_scale;
1775 interval <<= adj_scale;
1776 offset <<= adj_scale;
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1828
1829 WARN_ON_ONCE(1);
1830 return;
1831 }
1832
1833 tk->tkr_mono.mult += mult_adj;
1834 tk->xtime_interval += interval;
1835 tk->tkr_mono.xtime_nsec -= offset;
1836 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1837}
1838
1839
1840
1841
1842
1843static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1844 s64 offset)
1845{
1846 s64 interval = tk->cycle_interval;
1847 s64 xinterval = tk->xtime_interval;
1848 u32 base = tk->tkr_mono.clock->mult;
1849 u32 max = tk->tkr_mono.clock->maxadj;
1850 u32 cur_adj = tk->tkr_mono.mult;
1851 s64 tick_error;
1852 bool negative;
1853 u32 adj_scale;
1854
1855
1856 if (tk->ntp_err_mult)
1857 xinterval -= tk->cycle_interval;
1858
1859 tk->ntp_tick = ntp_tick_length();
1860
1861
1862 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1863 tick_error -= (xinterval + tk->xtime_remainder);
1864
1865
1866 if (likely((tick_error >= 0) && (tick_error <= interval)))
1867 return;
1868
1869
1870 negative = (tick_error < 0);
1871
1872
1873 if (negative && (cur_adj - 1) <= (base - max))
1874 return;
1875 if (!negative && (cur_adj + 1) >= (base + max))
1876 return;
1877
1878
1879
1880
1881
1882 adj_scale = 0;
1883 tick_error = abs(tick_error);
1884 while (tick_error > interval) {
1885 u32 adj = 1 << (adj_scale + 1);
1886
1887
1888 if (negative && (cur_adj - adj) <= (base - max))
1889 break;
1890 if (!negative && (cur_adj + adj) >= (base + max))
1891 break;
1892
1893 adj_scale++;
1894 tick_error >>= 1;
1895 }
1896
1897
1898 timekeeping_apply_adjustment(tk, offset, negative, adj_scale);
1899}
1900
1901
1902
1903
1904
1905static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1906{
1907
1908 timekeeping_freqadjust(tk, offset);
1909
1910
1911 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1912 tk->ntp_err_mult = 1;
1913 timekeeping_apply_adjustment(tk, offset, 0, 0);
1914 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1915
1916 timekeeping_apply_adjustment(tk, offset, 1, 0);
1917 tk->ntp_err_mult = 0;
1918 }
1919
1920 if (unlikely(tk->tkr_mono.clock->maxadj &&
1921 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1922 > tk->tkr_mono.clock->maxadj))) {
1923 printk_once(KERN_WARNING
1924 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1925 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1926 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1927 }
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1944 s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1945 tk->tkr_mono.xtime_nsec = 0;
1946 tk->ntp_error += neg << tk->ntp_error_shift;
1947 }
1948}
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1959{
1960 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1961 unsigned int clock_set = 0;
1962
1963 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1964 int leap;
1965
1966 tk->tkr_mono.xtime_nsec -= nsecps;
1967 tk->xtime_sec++;
1968
1969
1970 leap = second_overflow(tk->xtime_sec);
1971 if (unlikely(leap)) {
1972 struct timespec64 ts;
1973
1974 tk->xtime_sec += leap;
1975
1976 ts.tv_sec = leap;
1977 ts.tv_nsec = 0;
1978 tk_set_wall_to_mono(tk,
1979 timespec64_sub(tk->wall_to_monotonic, ts));
1980
1981 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1982
1983 clock_set = TK_CLOCK_WAS_SET;
1984 }
1985 }
1986 return clock_set;
1987}
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1999 u32 shift,
2000 unsigned int *clock_set)
2001{
2002 cycle_t interval = tk->cycle_interval << shift;
2003 u64 raw_nsecs;
2004
2005
2006 if (offset < interval)
2007 return offset;
2008
2009
2010 offset -= interval;
2011 tk->tkr_mono.cycle_last += interval;
2012 tk->tkr_raw.cycle_last += interval;
2013
2014 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2015 *clock_set |= accumulate_nsecs_to_secs(tk);
2016
2017
2018 raw_nsecs = (u64)tk->raw_interval << shift;
2019 raw_nsecs += tk->raw_time.tv_nsec;
2020 if (raw_nsecs >= NSEC_PER_SEC) {
2021 u64 raw_secs = raw_nsecs;
2022 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
2023 tk->raw_time.tv_sec += raw_secs;
2024 }
2025 tk->raw_time.tv_nsec = raw_nsecs;
2026
2027
2028 tk->ntp_error += tk->ntp_tick << shift;
2029 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2030 (tk->ntp_error_shift + shift);
2031
2032 return offset;
2033}
2034
2035
2036
2037
2038
2039void update_wall_time(void)
2040{
2041 struct timekeeper *real_tk = &tk_core.timekeeper;
2042 struct timekeeper *tk = &shadow_timekeeper;
2043 cycle_t offset;
2044 int shift = 0, maxshift;
2045 unsigned int clock_set = 0;
2046 unsigned long flags;
2047
2048 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2049
2050
2051 if (unlikely(timekeeping_suspended))
2052 goto out;
2053
2054#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2055 offset = real_tk->cycle_interval;
2056#else
2057 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
2058 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2059#endif
2060
2061
2062 if (offset < real_tk->cycle_interval)
2063 goto out;
2064
2065
2066 timekeeping_check_update(real_tk, offset);
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2077 shift = max(0, shift);
2078
2079 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2080 shift = min(shift, maxshift);
2081 while (offset >= tk->cycle_interval) {
2082 offset = logarithmic_accumulation(tk, offset, shift,
2083 &clock_set);
2084 if (offset < tk->cycle_interval<<shift)
2085 shift--;
2086 }
2087
2088
2089 timekeeping_adjust(tk, offset);
2090
2091
2092
2093
2094
2095 old_vsyscall_fixup(tk);
2096
2097
2098
2099
2100
2101 clock_set |= accumulate_nsecs_to_secs(tk);
2102
2103 write_seqcount_begin(&tk_core.seq);
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114 timekeeping_update(tk, clock_set);
2115 memcpy(real_tk, tk, sizeof(*tk));
2116
2117 write_seqcount_end(&tk_core.seq);
2118out:
2119 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2120 if (clock_set)
2121
2122 clock_was_set_delayed();
2123}
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136void getboottime64(struct timespec64 *ts)
2137{
2138 struct timekeeper *tk = &tk_core.timekeeper;
2139 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2140
2141 *ts = ktime_to_timespec64(t);
2142}
2143EXPORT_SYMBOL_GPL(getboottime64);
2144
2145unsigned long get_seconds(void)
2146{
2147 struct timekeeper *tk = &tk_core.timekeeper;
2148
2149 return tk->xtime_sec;
2150}
2151EXPORT_SYMBOL(get_seconds);
2152
2153struct timespec __current_kernel_time(void)
2154{
2155 struct timekeeper *tk = &tk_core.timekeeper;
2156
2157 return timespec64_to_timespec(tk_xtime(tk));
2158}
2159
2160struct timespec64 current_kernel_time64(void)
2161{
2162 struct timekeeper *tk = &tk_core.timekeeper;
2163 struct timespec64 now;
2164 unsigned long seq;
2165
2166 do {
2167 seq = read_seqcount_begin(&tk_core.seq);
2168
2169 now = tk_xtime(tk);
2170 } while (read_seqcount_retry(&tk_core.seq, seq));
2171
2172 return now;
2173}
2174EXPORT_SYMBOL(current_kernel_time64);
2175
2176struct timespec64 get_monotonic_coarse64(void)
2177{
2178 struct timekeeper *tk = &tk_core.timekeeper;
2179 struct timespec64 now, mono;
2180 unsigned long seq;
2181
2182 do {
2183 seq = read_seqcount_begin(&tk_core.seq);
2184
2185 now = tk_xtime(tk);
2186 mono = tk->wall_to_monotonic;
2187 } while (read_seqcount_retry(&tk_core.seq, seq));
2188
2189 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
2190 now.tv_nsec + mono.tv_nsec);
2191
2192 return now;
2193}
2194EXPORT_SYMBOL(get_monotonic_coarse64);
2195
2196
2197
2198
2199void do_timer(unsigned long ticks)
2200{
2201 jiffies_64 += ticks;
2202 calc_global_load(ticks);
2203}
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2219 ktime_t *offs_boot, ktime_t *offs_tai)
2220{
2221 struct timekeeper *tk = &tk_core.timekeeper;
2222 unsigned int seq;
2223 ktime_t base;
2224 u64 nsecs;
2225
2226 do {
2227 seq = read_seqcount_begin(&tk_core.seq);
2228
2229 base = tk->tkr_mono.base;
2230 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2231 base = ktime_add_ns(base, nsecs);
2232
2233 if (*cwsseq != tk->clock_was_set_seq) {
2234 *cwsseq = tk->clock_was_set_seq;
2235 *offs_real = tk->offs_real;
2236 *offs_boot = tk->offs_boot;
2237 *offs_tai = tk->offs_tai;
2238 }
2239
2240
2241 if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64))
2242 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2243
2244 } while (read_seqcount_retry(&tk_core.seq, seq));
2245
2246 return base;
2247}
2248
2249
2250
2251
2252int do_adjtimex(struct timex *txc)
2253{
2254 struct timekeeper *tk = &tk_core.timekeeper;
2255 unsigned long flags;
2256 struct timespec64 ts;
2257 s32 orig_tai, tai;
2258 int ret;
2259
2260
2261 ret = ntp_validate_timex(txc);
2262 if (ret)
2263 return ret;
2264
2265 if (txc->modes & ADJ_SETOFFSET) {
2266 struct timespec delta;
2267 delta.tv_sec = txc->time.tv_sec;
2268 delta.tv_nsec = txc->time.tv_usec;
2269 if (!(txc->modes & ADJ_NANO))
2270 delta.tv_nsec *= 1000;
2271 ret = timekeeping_inject_offset(&delta);
2272 if (ret)
2273 return ret;
2274 }
2275
2276 getnstimeofday64(&ts);
2277
2278 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2279 write_seqcount_begin(&tk_core.seq);
2280
2281 orig_tai = tai = tk->tai_offset;
2282 ret = __do_adjtimex(txc, &ts, &tai);
2283
2284 if (tai != orig_tai) {
2285 __timekeeping_set_tai_offset(tk, tai);
2286 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2287 }
2288 tk_update_leap_state(tk);
2289
2290 write_seqcount_end(&tk_core.seq);
2291 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2292
2293 if (tai != orig_tai)
2294 clock_was_set();
2295
2296 ntp_notify_cmos_timer();
2297
2298 return ret;
2299}
2300
2301#ifdef CONFIG_NTP_PPS
2302
2303
2304
2305void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2306{
2307 unsigned long flags;
2308
2309 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2310 write_seqcount_begin(&tk_core.seq);
2311
2312 __hardpps(phase_ts, raw_ts);
2313
2314 write_seqcount_end(&tk_core.seq);
2315 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2316}
2317EXPORT_SYMBOL(hardpps);
2318#endif
2319
2320
2321
2322
2323
2324
2325
2326void xtime_update(unsigned long ticks)
2327{
2328 write_seqlock(&jiffies_lock);
2329 do_timer(ticks);
2330 write_sequnlock(&jiffies_lock);
2331 update_wall_time();
2332}
2333