1
2
3
4
5
6#include <linux/timekeeper_internal.h>
7#include <linux/module.h>
8#include <linux/interrupt.h>
9#include <linux/percpu.h>
10#include <linux/init.h>
11#include <linux/mm.h>
12#include <linux/nmi.h>
13#include <linux/sched.h>
14#include <linux/sched/loadavg.h>
15#include <linux/sched/clock.h>
16#include <linux/syscore_ops.h>
17#include <linux/clocksource.h>
18#include <linux/jiffies.h>
19#include <linux/time.h>
20#include <linux/tick.h>
21#include <linux/stop_machine.h>
22#include <linux/pvclock_gtod.h>
23#include <linux/compiler.h>
24#include <linux/audit.h>
25
26#include "tick-internal.h"
27#include "ntp_internal.h"
28#include "timekeeping_internal.h"
29
30#define TK_CLEAR_NTP (1 << 0)
31#define TK_MIRROR (1 << 1)
32#define TK_CLOCK_WAS_SET (1 << 2)
33
34enum timekeeping_adv_mode {
35
36 TK_ADV_TICK,
37
38
39 TK_ADV_FREQ
40};
41
42DEFINE_RAW_SPINLOCK(timekeeper_lock);
43
44
45
46
47
48static struct {
49 seqcount_raw_spinlock_t seq;
50 struct timekeeper timekeeper;
51} tk_core ____cacheline_aligned = {
52 .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock),
53};
54
55static struct timekeeper shadow_timekeeper;
56
57
58
59
60
61
62
63
64
65
66struct tk_fast {
67 seqcount_raw_spinlock_t seq;
68 struct tk_read_base base[2];
69};
70
71
72static u64 cycles_at_suspend;
73
74static u64 dummy_clock_read(struct clocksource *cs)
75{
76 return cycles_at_suspend;
77}
78
79static struct clocksource dummy_clock = {
80 .read = dummy_clock_read,
81};
82
83static struct tk_fast tk_fast_mono ____cacheline_aligned = {
84 .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_mono.seq, &timekeeper_lock),
85 .base[0] = { .clock = &dummy_clock, },
86 .base[1] = { .clock = &dummy_clock, },
87};
88
89static struct tk_fast tk_fast_raw ____cacheline_aligned = {
90 .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_fast_raw.seq, &timekeeper_lock),
91 .base[0] = { .clock = &dummy_clock, },
92 .base[1] = { .clock = &dummy_clock, },
93};
94
95
96int __read_mostly timekeeping_suspended;
97
98static inline void tk_normalize_xtime(struct timekeeper *tk)
99{
100 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
101 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
102 tk->xtime_sec++;
103 }
104 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
105 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
106 tk->raw_sec++;
107 }
108}
109
110static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
111{
112 struct timespec64 ts;
113
114 ts.tv_sec = tk->xtime_sec;
115 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
116 return ts;
117}
118
119static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
120{
121 tk->xtime_sec = ts->tv_sec;
122 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
123}
124
125static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
126{
127 tk->xtime_sec += ts->tv_sec;
128 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
129 tk_normalize_xtime(tk);
130}
131
132static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
133{
134 struct timespec64 tmp;
135
136
137
138
139
140 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
141 -tk->wall_to_monotonic.tv_nsec);
142 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
143 tk->wall_to_monotonic = wtm;
144 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
145 tk->offs_real = timespec64_to_ktime(tmp);
146 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
147}
148
149static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
150{
151 tk->offs_boot = ktime_add(tk->offs_boot, delta);
152
153
154
155
156 tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172static inline u64 tk_clock_read(const struct tk_read_base *tkr)
173{
174 struct clocksource *clock = READ_ONCE(tkr->clock);
175
176 return clock->read(clock);
177}
178
179#ifdef CONFIG_DEBUG_TIMEKEEPING
180#define WARNING_FREQ (HZ*300)
181
182static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
183{
184
185 u64 max_cycles = tk->tkr_mono.clock->max_cycles;
186 const char *name = tk->tkr_mono.clock->name;
187
188 if (offset > max_cycles) {
189 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
190 offset, name, max_cycles);
191 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
192 } else {
193 if (offset > (max_cycles >> 1)) {
194 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
195 offset, name, max_cycles >> 1);
196 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
197 }
198 }
199
200 if (tk->underflow_seen) {
201 if (jiffies - tk->last_warning > WARNING_FREQ) {
202 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
203 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
204 printk_deferred(" Your kernel is probably still fine.\n");
205 tk->last_warning = jiffies;
206 }
207 tk->underflow_seen = 0;
208 }
209
210 if (tk->overflow_seen) {
211 if (jiffies - tk->last_warning > WARNING_FREQ) {
212 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
213 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
214 printk_deferred(" Your kernel is probably still fine.\n");
215 tk->last_warning = jiffies;
216 }
217 tk->overflow_seen = 0;
218 }
219}
220
221static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
222{
223 struct timekeeper *tk = &tk_core.timekeeper;
224 u64 now, last, mask, max, delta;
225 unsigned int seq;
226
227
228
229
230
231
232
233
234 do {
235 seq = read_seqcount_begin(&tk_core.seq);
236 now = tk_clock_read(tkr);
237 last = tkr->cycle_last;
238 mask = tkr->mask;
239 max = tkr->clock->max_cycles;
240 } while (read_seqcount_retry(&tk_core.seq, seq));
241
242 delta = clocksource_delta(now, last, mask);
243
244
245
246
247
248 if (unlikely((~delta & mask) < (mask >> 3))) {
249 tk->underflow_seen = 1;
250 delta = 0;
251 }
252
253
254 if (unlikely(delta > max)) {
255 tk->overflow_seen = 1;
256 delta = tkr->clock->max_cycles;
257 }
258
259 return delta;
260}
261#else
262static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
263{
264}
265static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
266{
267 u64 cycle_now, delta;
268
269
270 cycle_now = tk_clock_read(tkr);
271
272
273 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
274
275 return delta;
276}
277#endif
278
279
280
281
282
283
284
285
286
287
288
289
290static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
291{
292 u64 interval;
293 u64 tmp, ntpinterval;
294 struct clocksource *old_clock;
295
296 ++tk->cs_was_changed_seq;
297 old_clock = tk->tkr_mono.clock;
298 tk->tkr_mono.clock = clock;
299 tk->tkr_mono.mask = clock->mask;
300 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
301
302 tk->tkr_raw.clock = clock;
303 tk->tkr_raw.mask = clock->mask;
304 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
305
306
307 tmp = NTP_INTERVAL_LENGTH;
308 tmp <<= clock->shift;
309 ntpinterval = tmp;
310 tmp += clock->mult/2;
311 do_div(tmp, clock->mult);
312 if (tmp == 0)
313 tmp = 1;
314
315 interval = (u64) tmp;
316 tk->cycle_interval = interval;
317
318
319 tk->xtime_interval = interval * clock->mult;
320 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
321 tk->raw_interval = interval * clock->mult;
322
323
324 if (old_clock) {
325 int shift_change = clock->shift - old_clock->shift;
326 if (shift_change < 0) {
327 tk->tkr_mono.xtime_nsec >>= -shift_change;
328 tk->tkr_raw.xtime_nsec >>= -shift_change;
329 } else {
330 tk->tkr_mono.xtime_nsec <<= shift_change;
331 tk->tkr_raw.xtime_nsec <<= shift_change;
332 }
333 }
334
335 tk->tkr_mono.shift = clock->shift;
336 tk->tkr_raw.shift = clock->shift;
337
338 tk->ntp_error = 0;
339 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
340 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
341
342
343
344
345
346
347 tk->tkr_mono.mult = clock->mult;
348 tk->tkr_raw.mult = clock->mult;
349 tk->ntp_err_mult = 0;
350 tk->skip_second_overflow = 0;
351}
352
353
354
355#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
356static u32 default_arch_gettimeoffset(void) { return 0; }
357u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
358#else
359static inline u32 arch_gettimeoffset(void) { return 0; }
360#endif
361
362static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
363{
364 u64 nsec;
365
366 nsec = delta * tkr->mult + tkr->xtime_nsec;
367 nsec >>= tkr->shift;
368
369
370 return nsec + arch_gettimeoffset();
371}
372
373static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
374{
375 u64 delta;
376
377 delta = timekeeping_get_delta(tkr);
378 return timekeeping_delta_to_ns(tkr, delta);
379}
380
381static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
382{
383 u64 delta;
384
385
386 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
387 return timekeeping_delta_to_ns(tkr, delta);
388}
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404static void update_fast_timekeeper(const struct tk_read_base *tkr,
405 struct tk_fast *tkf)
406{
407 struct tk_read_base *base = tkf->base;
408
409
410 raw_write_seqcount_latch(&tkf->seq);
411
412
413 memcpy(base, tkr, sizeof(*base));
414
415
416 raw_write_seqcount_latch(&tkf->seq);
417
418
419 memcpy(base + 1, base, sizeof(*base));
420}
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
455{
456 struct tk_read_base *tkr;
457 unsigned int seq;
458 u64 now;
459
460 do {
461 seq = raw_read_seqcount_latch(&tkf->seq);
462 tkr = tkf->base + (seq & 0x01);
463 now = ktime_to_ns(tkr->base);
464
465 now += timekeeping_delta_to_ns(tkr,
466 clocksource_delta(
467 tk_clock_read(tkr),
468 tkr->cycle_last,
469 tkr->mask));
470 } while (read_seqcount_retry(&tkf->seq, seq));
471
472 return now;
473}
474
475u64 ktime_get_mono_fast_ns(void)
476{
477 return __ktime_get_fast_ns(&tk_fast_mono);
478}
479EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
480
481u64 ktime_get_raw_fast_ns(void)
482{
483 return __ktime_get_fast_ns(&tk_fast_raw);
484}
485EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508u64 notrace ktime_get_boot_fast_ns(void)
509{
510 struct timekeeper *tk = &tk_core.timekeeper;
511
512 return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
513}
514EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
515
516
517
518
519
520static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
521{
522 struct tk_read_base *tkr;
523 unsigned int seq;
524 u64 now;
525
526 do {
527 seq = raw_read_seqcount_latch(&tkf->seq);
528 tkr = tkf->base + (seq & 0x01);
529 now = ktime_to_ns(tkr->base_real);
530
531 now += timekeeping_delta_to_ns(tkr,
532 clocksource_delta(
533 tk_clock_read(tkr),
534 tkr->cycle_last,
535 tkr->mask));
536 } while (read_seqcount_retry(&tkf->seq, seq));
537
538 return now;
539}
540
541
542
543
544u64 ktime_get_real_fast_ns(void)
545{
546 return __ktime_get_real_fast_ns(&tk_fast_mono);
547}
548EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
549
550
551
552
553
554
555
556
557
558
559
560static void halt_fast_timekeeper(const struct timekeeper *tk)
561{
562 static struct tk_read_base tkr_dummy;
563 const struct tk_read_base *tkr = &tk->tkr_mono;
564
565 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
566 cycles_at_suspend = tk_clock_read(tkr);
567 tkr_dummy.clock = &dummy_clock;
568 tkr_dummy.base_real = tkr->base + tk->offs_real;
569 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
570
571 tkr = &tk->tkr_raw;
572 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
573 tkr_dummy.clock = &dummy_clock;
574 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
575}
576
577static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
578
579static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
580{
581 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
582}
583
584
585
586
587int pvclock_gtod_register_notifier(struct notifier_block *nb)
588{
589 struct timekeeper *tk = &tk_core.timekeeper;
590 unsigned long flags;
591 int ret;
592
593 raw_spin_lock_irqsave(&timekeeper_lock, flags);
594 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
595 update_pvclock_gtod(tk, true);
596 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
597
598 return ret;
599}
600EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
601
602
603
604
605
606int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
607{
608 unsigned long flags;
609 int ret;
610
611 raw_spin_lock_irqsave(&timekeeper_lock, flags);
612 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
613 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
614
615 return ret;
616}
617EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
618
619
620
621
622static inline void tk_update_leap_state(struct timekeeper *tk)
623{
624 tk->next_leap_ktime = ntp_get_next_leap();
625 if (tk->next_leap_ktime != KTIME_MAX)
626
627 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
628}
629
630
631
632
633static inline void tk_update_ktime_data(struct timekeeper *tk)
634{
635 u64 seconds;
636 u32 nsec;
637
638
639
640
641
642
643
644
645 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
646 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
647 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
648
649
650
651
652
653
654 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
655 if (nsec >= NSEC_PER_SEC)
656 seconds++;
657 tk->ktime_sec = seconds;
658
659
660 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
661}
662
663
664static void timekeeping_update(struct timekeeper *tk, unsigned int action)
665{
666 if (action & TK_CLEAR_NTP) {
667 tk->ntp_error = 0;
668 ntp_clear();
669 }
670
671 tk_update_leap_state(tk);
672 tk_update_ktime_data(tk);
673
674 update_vsyscall(tk);
675 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
676
677 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
678 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
679 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
680
681 if (action & TK_CLOCK_WAS_SET)
682 tk->clock_was_set_seq++;
683
684
685
686
687
688 if (action & TK_MIRROR)
689 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
690 sizeof(tk_core.timekeeper));
691}
692
693
694
695
696
697
698
699
700static void timekeeping_forward_now(struct timekeeper *tk)
701{
702 u64 cycle_now, delta;
703
704 cycle_now = tk_clock_read(&tk->tkr_mono);
705 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
706 tk->tkr_mono.cycle_last = cycle_now;
707 tk->tkr_raw.cycle_last = cycle_now;
708
709 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
710
711
712 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
713
714
715 tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
716
717
718 tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
719
720 tk_normalize_xtime(tk);
721}
722
723
724
725
726
727
728
729void ktime_get_real_ts64(struct timespec64 *ts)
730{
731 struct timekeeper *tk = &tk_core.timekeeper;
732 unsigned int seq;
733 u64 nsecs;
734
735 WARN_ON(timekeeping_suspended);
736
737 do {
738 seq = read_seqcount_begin(&tk_core.seq);
739
740 ts->tv_sec = tk->xtime_sec;
741 nsecs = timekeeping_get_ns(&tk->tkr_mono);
742
743 } while (read_seqcount_retry(&tk_core.seq, seq));
744
745 ts->tv_nsec = 0;
746 timespec64_add_ns(ts, nsecs);
747}
748EXPORT_SYMBOL(ktime_get_real_ts64);
749
750ktime_t ktime_get(void)
751{
752 struct timekeeper *tk = &tk_core.timekeeper;
753 unsigned int seq;
754 ktime_t base;
755 u64 nsecs;
756
757 WARN_ON(timekeeping_suspended);
758
759 do {
760 seq = read_seqcount_begin(&tk_core.seq);
761 base = tk->tkr_mono.base;
762 nsecs = timekeeping_get_ns(&tk->tkr_mono);
763
764 } while (read_seqcount_retry(&tk_core.seq, seq));
765
766 return ktime_add_ns(base, nsecs);
767}
768EXPORT_SYMBOL_GPL(ktime_get);
769
770u32 ktime_get_resolution_ns(void)
771{
772 struct timekeeper *tk = &tk_core.timekeeper;
773 unsigned int seq;
774 u32 nsecs;
775
776 WARN_ON(timekeeping_suspended);
777
778 do {
779 seq = read_seqcount_begin(&tk_core.seq);
780 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
781 } while (read_seqcount_retry(&tk_core.seq, seq));
782
783 return nsecs;
784}
785EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
786
787static ktime_t *offsets[TK_OFFS_MAX] = {
788 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
789 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
790 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
791};
792
793ktime_t ktime_get_with_offset(enum tk_offsets offs)
794{
795 struct timekeeper *tk = &tk_core.timekeeper;
796 unsigned int seq;
797 ktime_t base, *offset = offsets[offs];
798 u64 nsecs;
799
800 WARN_ON(timekeeping_suspended);
801
802 do {
803 seq = read_seqcount_begin(&tk_core.seq);
804 base = ktime_add(tk->tkr_mono.base, *offset);
805 nsecs = timekeeping_get_ns(&tk->tkr_mono);
806
807 } while (read_seqcount_retry(&tk_core.seq, seq));
808
809 return ktime_add_ns(base, nsecs);
810
811}
812EXPORT_SYMBOL_GPL(ktime_get_with_offset);
813
814ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
815{
816 struct timekeeper *tk = &tk_core.timekeeper;
817 unsigned int seq;
818 ktime_t base, *offset = offsets[offs];
819 u64 nsecs;
820
821 WARN_ON(timekeeping_suspended);
822
823 do {
824 seq = read_seqcount_begin(&tk_core.seq);
825 base = ktime_add(tk->tkr_mono.base, *offset);
826 nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
827
828 } while (read_seqcount_retry(&tk_core.seq, seq));
829
830 return ktime_add_ns(base, nsecs);
831}
832EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
833
834
835
836
837
838
839ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
840{
841 ktime_t *offset = offsets[offs];
842 unsigned int seq;
843 ktime_t tconv;
844
845 do {
846 seq = read_seqcount_begin(&tk_core.seq);
847 tconv = ktime_add(tmono, *offset);
848 } while (read_seqcount_retry(&tk_core.seq, seq));
849
850 return tconv;
851}
852EXPORT_SYMBOL_GPL(ktime_mono_to_any);
853
854
855
856
857ktime_t ktime_get_raw(void)
858{
859 struct timekeeper *tk = &tk_core.timekeeper;
860 unsigned int seq;
861 ktime_t base;
862 u64 nsecs;
863
864 do {
865 seq = read_seqcount_begin(&tk_core.seq);
866 base = tk->tkr_raw.base;
867 nsecs = timekeeping_get_ns(&tk->tkr_raw);
868
869 } while (read_seqcount_retry(&tk_core.seq, seq));
870
871 return ktime_add_ns(base, nsecs);
872}
873EXPORT_SYMBOL_GPL(ktime_get_raw);
874
875
876
877
878
879
880
881
882
883void ktime_get_ts64(struct timespec64 *ts)
884{
885 struct timekeeper *tk = &tk_core.timekeeper;
886 struct timespec64 tomono;
887 unsigned int seq;
888 u64 nsec;
889
890 WARN_ON(timekeeping_suspended);
891
892 do {
893 seq = read_seqcount_begin(&tk_core.seq);
894 ts->tv_sec = tk->xtime_sec;
895 nsec = timekeeping_get_ns(&tk->tkr_mono);
896 tomono = tk->wall_to_monotonic;
897
898 } while (read_seqcount_retry(&tk_core.seq, seq));
899
900 ts->tv_sec += tomono.tv_sec;
901 ts->tv_nsec = 0;
902 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
903}
904EXPORT_SYMBOL_GPL(ktime_get_ts64);
905
906
907
908
909
910
911
912
913
914
915time64_t ktime_get_seconds(void)
916{
917 struct timekeeper *tk = &tk_core.timekeeper;
918
919 WARN_ON(timekeeping_suspended);
920 return tk->ktime_sec;
921}
922EXPORT_SYMBOL_GPL(ktime_get_seconds);
923
924
925
926
927
928
929
930
931
932
933
934
935time64_t ktime_get_real_seconds(void)
936{
937 struct timekeeper *tk = &tk_core.timekeeper;
938 time64_t seconds;
939 unsigned int seq;
940
941 if (IS_ENABLED(CONFIG_64BIT))
942 return tk->xtime_sec;
943
944 do {
945 seq = read_seqcount_begin(&tk_core.seq);
946 seconds = tk->xtime_sec;
947
948 } while (read_seqcount_retry(&tk_core.seq, seq));
949
950 return seconds;
951}
952EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
953
954
955
956
957
958
959noinstr time64_t __ktime_get_real_seconds(void)
960{
961 struct timekeeper *tk = &tk_core.timekeeper;
962
963 return tk->xtime_sec;
964}
965
966
967
968
969
970void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
971{
972 struct timekeeper *tk = &tk_core.timekeeper;
973 unsigned int seq;
974 ktime_t base_raw;
975 ktime_t base_real;
976 u64 nsec_raw;
977 u64 nsec_real;
978 u64 now;
979
980 WARN_ON_ONCE(timekeeping_suspended);
981
982 do {
983 seq = read_seqcount_begin(&tk_core.seq);
984 now = tk_clock_read(&tk->tkr_mono);
985 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
986 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
987 base_real = ktime_add(tk->tkr_mono.base,
988 tk_core.timekeeper.offs_real);
989 base_raw = tk->tkr_raw.base;
990 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
991 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
992 } while (read_seqcount_retry(&tk_core.seq, seq));
993
994 systime_snapshot->cycles = now;
995 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
996 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
997}
998EXPORT_SYMBOL_GPL(ktime_get_snapshot);
999
1000
1001static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
1002{
1003 u64 tmp, rem;
1004
1005 tmp = div64_u64_rem(*base, div, &rem);
1006
1007 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1008 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1009 return -EOVERFLOW;
1010 tmp *= mult;
1011
1012 rem = div64_u64(rem * mult, div);
1013 *base = tmp + rem;
1014 return 0;
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1035 u64 partial_history_cycles,
1036 u64 total_history_cycles,
1037 bool discontinuity,
1038 struct system_device_crosststamp *ts)
1039{
1040 struct timekeeper *tk = &tk_core.timekeeper;
1041 u64 corr_raw, corr_real;
1042 bool interp_forward;
1043 int ret;
1044
1045 if (total_history_cycles == 0 || partial_history_cycles == 0)
1046 return 0;
1047
1048
1049 interp_forward = partial_history_cycles > total_history_cycles / 2;
1050 partial_history_cycles = interp_forward ?
1051 total_history_cycles - partial_history_cycles :
1052 partial_history_cycles;
1053
1054
1055
1056
1057
1058 corr_raw = (u64)ktime_to_ns(
1059 ktime_sub(ts->sys_monoraw, history->raw));
1060 ret = scale64_check_overflow(partial_history_cycles,
1061 total_history_cycles, &corr_raw);
1062 if (ret)
1063 return ret;
1064
1065
1066
1067
1068
1069
1070
1071
1072 if (discontinuity) {
1073 corr_real = mul_u64_u32_div
1074 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1075 } else {
1076 corr_real = (u64)ktime_to_ns(
1077 ktime_sub(ts->sys_realtime, history->real));
1078 ret = scale64_check_overflow(partial_history_cycles,
1079 total_history_cycles, &corr_real);
1080 if (ret)
1081 return ret;
1082 }
1083
1084
1085 if (interp_forward) {
1086 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1087 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1088 } else {
1089 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1090 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1091 }
1092
1093 return 0;
1094}
1095
1096
1097
1098
1099static bool cycle_between(u64 before, u64 test, u64 after)
1100{
1101 if (test > before && test < after)
1102 return true;
1103 if (test < before && before > after)
1104 return true;
1105 return false;
1106}
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119int get_device_system_crosststamp(int (*get_time_fn)
1120 (ktime_t *device_time,
1121 struct system_counterval_t *sys_counterval,
1122 void *ctx),
1123 void *ctx,
1124 struct system_time_snapshot *history_begin,
1125 struct system_device_crosststamp *xtstamp)
1126{
1127 struct system_counterval_t system_counterval;
1128 struct timekeeper *tk = &tk_core.timekeeper;
1129 u64 cycles, now, interval_start;
1130 unsigned int clock_was_set_seq = 0;
1131 ktime_t base_real, base_raw;
1132 u64 nsec_real, nsec_raw;
1133 u8 cs_was_changed_seq;
1134 unsigned int seq;
1135 bool do_interp;
1136 int ret;
1137
1138 do {
1139 seq = read_seqcount_begin(&tk_core.seq);
1140
1141
1142
1143
1144 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1145 if (ret)
1146 return ret;
1147
1148
1149
1150
1151
1152
1153 if (tk->tkr_mono.clock != system_counterval.cs)
1154 return -ENODEV;
1155 cycles = system_counterval.cycles;
1156
1157
1158
1159
1160
1161 now = tk_clock_read(&tk->tkr_mono);
1162 interval_start = tk->tkr_mono.cycle_last;
1163 if (!cycle_between(interval_start, cycles, now)) {
1164 clock_was_set_seq = tk->clock_was_set_seq;
1165 cs_was_changed_seq = tk->cs_was_changed_seq;
1166 cycles = interval_start;
1167 do_interp = true;
1168 } else {
1169 do_interp = false;
1170 }
1171
1172 base_real = ktime_add(tk->tkr_mono.base,
1173 tk_core.timekeeper.offs_real);
1174 base_raw = tk->tkr_raw.base;
1175
1176 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1177 system_counterval.cycles);
1178 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1179 system_counterval.cycles);
1180 } while (read_seqcount_retry(&tk_core.seq, seq));
1181
1182 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1183 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1184
1185
1186
1187
1188
1189 if (do_interp) {
1190 u64 partial_history_cycles, total_history_cycles;
1191 bool discontinuity;
1192
1193
1194
1195
1196
1197
1198 if (!history_begin ||
1199 !cycle_between(history_begin->cycles,
1200 system_counterval.cycles, cycles) ||
1201 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1202 return -EINVAL;
1203 partial_history_cycles = cycles - system_counterval.cycles;
1204 total_history_cycles = cycles - history_begin->cycles;
1205 discontinuity =
1206 history_begin->clock_was_set_seq != clock_was_set_seq;
1207
1208 ret = adjust_historical_crosststamp(history_begin,
1209 partial_history_cycles,
1210 total_history_cycles,
1211 discontinuity, xtstamp);
1212 if (ret)
1213 return ret;
1214 }
1215
1216 return 0;
1217}
1218EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1219
1220
1221
1222
1223
1224
1225
1226int do_settimeofday64(const struct timespec64 *ts)
1227{
1228 struct timekeeper *tk = &tk_core.timekeeper;
1229 struct timespec64 ts_delta, xt;
1230 unsigned long flags;
1231 int ret = 0;
1232
1233 if (!timespec64_valid_settod(ts))
1234 return -EINVAL;
1235
1236 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1237 write_seqcount_begin(&tk_core.seq);
1238
1239 timekeeping_forward_now(tk);
1240
1241 xt = tk_xtime(tk);
1242 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1243 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1244
1245 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1246 ret = -EINVAL;
1247 goto out;
1248 }
1249
1250 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1251
1252 tk_set_xtime(tk, ts);
1253out:
1254 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1255
1256 write_seqcount_end(&tk_core.seq);
1257 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1258
1259
1260 clock_was_set();
1261
1262 if (!ret)
1263 audit_tk_injoffset(ts_delta);
1264
1265 return ret;
1266}
1267EXPORT_SYMBOL(do_settimeofday64);
1268
1269
1270
1271
1272
1273
1274
1275static int timekeeping_inject_offset(const struct timespec64 *ts)
1276{
1277 struct timekeeper *tk = &tk_core.timekeeper;
1278 unsigned long flags;
1279 struct timespec64 tmp;
1280 int ret = 0;
1281
1282 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1283 return -EINVAL;
1284
1285 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1286 write_seqcount_begin(&tk_core.seq);
1287
1288 timekeeping_forward_now(tk);
1289
1290
1291 tmp = timespec64_add(tk_xtime(tk), *ts);
1292 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1293 !timespec64_valid_settod(&tmp)) {
1294 ret = -EINVAL;
1295 goto error;
1296 }
1297
1298 tk_xtime_add(tk, ts);
1299 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1300
1301error:
1302 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1303
1304 write_seqcount_end(&tk_core.seq);
1305 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1306
1307
1308 clock_was_set();
1309
1310 return ret;
1311}
1312
1313
1314
1315
1316
1317int persistent_clock_is_local;
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335void timekeeping_warp_clock(void)
1336{
1337 if (sys_tz.tz_minuteswest != 0) {
1338 struct timespec64 adjust;
1339
1340 persistent_clock_is_local = 1;
1341 adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1342 adjust.tv_nsec = 0;
1343 timekeeping_inject_offset(&adjust);
1344 }
1345}
1346
1347
1348
1349
1350
1351static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1352{
1353 tk->tai_offset = tai_offset;
1354 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1355}
1356
1357
1358
1359
1360
1361
1362static int change_clocksource(void *data)
1363{
1364 struct timekeeper *tk = &tk_core.timekeeper;
1365 struct clocksource *new, *old;
1366 unsigned long flags;
1367
1368 new = (struct clocksource *) data;
1369
1370 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1371 write_seqcount_begin(&tk_core.seq);
1372
1373 timekeeping_forward_now(tk);
1374
1375
1376
1377
1378 if (try_module_get(new->owner)) {
1379 if (!new->enable || new->enable(new) == 0) {
1380 old = tk->tkr_mono.clock;
1381 tk_setup_internals(tk, new);
1382 if (old->disable)
1383 old->disable(old);
1384 module_put(old->owner);
1385 } else {
1386 module_put(new->owner);
1387 }
1388 }
1389 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1390
1391 write_seqcount_end(&tk_core.seq);
1392 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1393
1394 return 0;
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404int timekeeping_notify(struct clocksource *clock)
1405{
1406 struct timekeeper *tk = &tk_core.timekeeper;
1407
1408 if (tk->tkr_mono.clock == clock)
1409 return 0;
1410 stop_machine(change_clocksource, clock, NULL);
1411 tick_clock_notify();
1412 return tk->tkr_mono.clock == clock ? 0 : -1;
1413}
1414
1415
1416
1417
1418
1419
1420
1421void ktime_get_raw_ts64(struct timespec64 *ts)
1422{
1423 struct timekeeper *tk = &tk_core.timekeeper;
1424 unsigned int seq;
1425 u64 nsecs;
1426
1427 do {
1428 seq = read_seqcount_begin(&tk_core.seq);
1429 ts->tv_sec = tk->raw_sec;
1430 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1431
1432 } while (read_seqcount_retry(&tk_core.seq, seq));
1433
1434 ts->tv_nsec = 0;
1435 timespec64_add_ns(ts, nsecs);
1436}
1437EXPORT_SYMBOL(ktime_get_raw_ts64);
1438
1439
1440
1441
1442
1443int timekeeping_valid_for_hres(void)
1444{
1445 struct timekeeper *tk = &tk_core.timekeeper;
1446 unsigned int seq;
1447 int ret;
1448
1449 do {
1450 seq = read_seqcount_begin(&tk_core.seq);
1451
1452 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1453
1454 } while (read_seqcount_retry(&tk_core.seq, seq));
1455
1456 return ret;
1457}
1458
1459
1460
1461
1462u64 timekeeping_max_deferment(void)
1463{
1464 struct timekeeper *tk = &tk_core.timekeeper;
1465 unsigned int seq;
1466 u64 ret;
1467
1468 do {
1469 seq = read_seqcount_begin(&tk_core.seq);
1470
1471 ret = tk->tkr_mono.clock->max_idle_ns;
1472
1473 } while (read_seqcount_retry(&tk_core.seq, seq));
1474
1475 return ret;
1476}
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487void __weak read_persistent_clock64(struct timespec64 *ts)
1488{
1489 ts->tv_sec = 0;
1490 ts->tv_nsec = 0;
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505void __weak __init
1506read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1507 struct timespec64 *boot_offset)
1508{
1509 read_persistent_clock64(wall_time);
1510 *boot_offset = ns_to_timespec64(local_clock());
1511}
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526static bool suspend_timing_needed;
1527
1528
1529static bool persistent_clock_exists;
1530
1531
1532
1533
1534void __init timekeeping_init(void)
1535{
1536 struct timespec64 wall_time, boot_offset, wall_to_mono;
1537 struct timekeeper *tk = &tk_core.timekeeper;
1538 struct clocksource *clock;
1539 unsigned long flags;
1540
1541 read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1542 if (timespec64_valid_settod(&wall_time) &&
1543 timespec64_to_ns(&wall_time) > 0) {
1544 persistent_clock_exists = true;
1545 } else if (timespec64_to_ns(&wall_time) != 0) {
1546 pr_warn("Persistent clock returned invalid value");
1547 wall_time = (struct timespec64){0};
1548 }
1549
1550 if (timespec64_compare(&wall_time, &boot_offset) < 0)
1551 boot_offset = (struct timespec64){0};
1552
1553
1554
1555
1556
1557 wall_to_mono = timespec64_sub(boot_offset, wall_time);
1558
1559 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1560 write_seqcount_begin(&tk_core.seq);
1561 ntp_init();
1562
1563 clock = clocksource_default_clock();
1564 if (clock->enable)
1565 clock->enable(clock);
1566 tk_setup_internals(tk, clock);
1567
1568 tk_set_xtime(tk, &wall_time);
1569 tk->raw_sec = 0;
1570
1571 tk_set_wall_to_mono(tk, wall_to_mono);
1572
1573 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1574
1575 write_seqcount_end(&tk_core.seq);
1576 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1577}
1578
1579
1580static struct timespec64 timekeeping_suspend_time;
1581
1582
1583
1584
1585
1586
1587
1588
1589static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1590 const struct timespec64 *delta)
1591{
1592 if (!timespec64_valid_strict(delta)) {
1593 printk_deferred(KERN_WARNING
1594 "__timekeeping_inject_sleeptime: Invalid "
1595 "sleep delta value!\n");
1596 return;
1597 }
1598 tk_xtime_add(tk, delta);
1599 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1600 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1601 tk_debug_account_sleep_time(delta);
1602}
1603
1604#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621bool timekeeping_rtc_skipresume(void)
1622{
1623 return !suspend_timing_needed;
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635bool timekeeping_rtc_skipsuspend(void)
1636{
1637 return persistent_clock_exists;
1638}
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1652{
1653 struct timekeeper *tk = &tk_core.timekeeper;
1654 unsigned long flags;
1655
1656 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1657 write_seqcount_begin(&tk_core.seq);
1658
1659 suspend_timing_needed = false;
1660
1661 timekeeping_forward_now(tk);
1662
1663 __timekeeping_inject_sleeptime(tk, delta);
1664
1665 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1666
1667 write_seqcount_end(&tk_core.seq);
1668 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1669
1670
1671 clock_was_set();
1672}
1673#endif
1674
1675
1676
1677
1678void timekeeping_resume(void)
1679{
1680 struct timekeeper *tk = &tk_core.timekeeper;
1681 struct clocksource *clock = tk->tkr_mono.clock;
1682 unsigned long flags;
1683 struct timespec64 ts_new, ts_delta;
1684 u64 cycle_now, nsec;
1685 bool inject_sleeptime = false;
1686
1687 read_persistent_clock64(&ts_new);
1688
1689 clockevents_resume();
1690 clocksource_resume();
1691
1692 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1693 write_seqcount_begin(&tk_core.seq);
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707 cycle_now = tk_clock_read(&tk->tkr_mono);
1708 nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1709 if (nsec > 0) {
1710 ts_delta = ns_to_timespec64(nsec);
1711 inject_sleeptime = true;
1712 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1713 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1714 inject_sleeptime = true;
1715 }
1716
1717 if (inject_sleeptime) {
1718 suspend_timing_needed = false;
1719 __timekeeping_inject_sleeptime(tk, &ts_delta);
1720 }
1721
1722
1723 tk->tkr_mono.cycle_last = cycle_now;
1724 tk->tkr_raw.cycle_last = cycle_now;
1725
1726 tk->ntp_error = 0;
1727 timekeeping_suspended = 0;
1728 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1729 write_seqcount_end(&tk_core.seq);
1730 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1731
1732 touch_softlockup_watchdog();
1733
1734 tick_resume();
1735 hrtimers_resume();
1736}
1737
1738int timekeeping_suspend(void)
1739{
1740 struct timekeeper *tk = &tk_core.timekeeper;
1741 unsigned long flags;
1742 struct timespec64 delta, delta_delta;
1743 static struct timespec64 old_delta;
1744 struct clocksource *curr_clock;
1745 u64 cycle_now;
1746
1747 read_persistent_clock64(&timekeeping_suspend_time);
1748
1749
1750
1751
1752
1753
1754 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1755 persistent_clock_exists = true;
1756
1757 suspend_timing_needed = true;
1758
1759 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1760 write_seqcount_begin(&tk_core.seq);
1761 timekeeping_forward_now(tk);
1762 timekeeping_suspended = 1;
1763
1764
1765
1766
1767
1768
1769 curr_clock = tk->tkr_mono.clock;
1770 cycle_now = tk->tkr_mono.cycle_last;
1771 clocksource_start_suspend_timing(curr_clock, cycle_now);
1772
1773 if (persistent_clock_exists) {
1774
1775
1776
1777
1778
1779
1780 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1781 delta_delta = timespec64_sub(delta, old_delta);
1782 if (abs(delta_delta.tv_sec) >= 2) {
1783
1784
1785
1786
1787 old_delta = delta;
1788 } else {
1789
1790 timekeeping_suspend_time =
1791 timespec64_add(timekeeping_suspend_time, delta_delta);
1792 }
1793 }
1794
1795 timekeeping_update(tk, TK_MIRROR);
1796 halt_fast_timekeeper(tk);
1797 write_seqcount_end(&tk_core.seq);
1798 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1799
1800 tick_suspend();
1801 clocksource_suspend();
1802 clockevents_suspend();
1803
1804 return 0;
1805}
1806
1807
1808static struct syscore_ops timekeeping_syscore_ops = {
1809 .resume = timekeeping_resume,
1810 .suspend = timekeeping_suspend,
1811};
1812
1813static int __init timekeeping_init_ops(void)
1814{
1815 register_syscore_ops(&timekeeping_syscore_ops);
1816 return 0;
1817}
1818device_initcall(timekeeping_init_ops);
1819
1820
1821
1822
1823static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1824 s64 offset,
1825 s32 mult_adj)
1826{
1827 s64 interval = tk->cycle_interval;
1828
1829 if (mult_adj == 0) {
1830 return;
1831 } else if (mult_adj == -1) {
1832 interval = -interval;
1833 offset = -offset;
1834 } else if (mult_adj != 1) {
1835 interval *= mult_adj;
1836 offset *= mult_adj;
1837 }
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1887
1888 WARN_ON_ONCE(1);
1889 return;
1890 }
1891
1892 tk->tkr_mono.mult += mult_adj;
1893 tk->xtime_interval += interval;
1894 tk->tkr_mono.xtime_nsec -= offset;
1895}
1896
1897
1898
1899
1900
1901static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1902{
1903 u32 mult;
1904
1905
1906
1907
1908
1909 if (likely(tk->ntp_tick == ntp_tick_length())) {
1910 mult = tk->tkr_mono.mult - tk->ntp_err_mult;
1911 } else {
1912 tk->ntp_tick = ntp_tick_length();
1913 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
1914 tk->xtime_remainder, tk->cycle_interval);
1915 }
1916
1917
1918
1919
1920
1921
1922
1923 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
1924 mult += tk->ntp_err_mult;
1925
1926 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
1927
1928 if (unlikely(tk->tkr_mono.clock->maxadj &&
1929 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1930 > tk->tkr_mono.clock->maxadj))) {
1931 printk_once(KERN_WARNING
1932 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1933 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1934 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1935 }
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1948 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
1949 tk->tkr_mono.shift;
1950 tk->xtime_sec--;
1951 tk->skip_second_overflow = 1;
1952 }
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1964{
1965 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1966 unsigned int clock_set = 0;
1967
1968 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1969 int leap;
1970
1971 tk->tkr_mono.xtime_nsec -= nsecps;
1972 tk->xtime_sec++;
1973
1974
1975
1976
1977
1978 if (unlikely(tk->skip_second_overflow)) {
1979 tk->skip_second_overflow = 0;
1980 continue;
1981 }
1982
1983
1984 leap = second_overflow(tk->xtime_sec);
1985 if (unlikely(leap)) {
1986 struct timespec64 ts;
1987
1988 tk->xtime_sec += leap;
1989
1990 ts.tv_sec = leap;
1991 ts.tv_nsec = 0;
1992 tk_set_wall_to_mono(tk,
1993 timespec64_sub(tk->wall_to_monotonic, ts));
1994
1995 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1996
1997 clock_set = TK_CLOCK_WAS_SET;
1998 }
1999 }
2000 return clock_set;
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2013 u32 shift, unsigned int *clock_set)
2014{
2015 u64 interval = tk->cycle_interval << shift;
2016 u64 snsec_per_sec;
2017
2018
2019 if (offset < interval)
2020 return offset;
2021
2022
2023 offset -= interval;
2024 tk->tkr_mono.cycle_last += interval;
2025 tk->tkr_raw.cycle_last += interval;
2026
2027 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2028 *clock_set |= accumulate_nsecs_to_secs(tk);
2029
2030
2031 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2032 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2033 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2034 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2035 tk->raw_sec++;
2036 }
2037
2038
2039 tk->ntp_error += tk->ntp_tick << shift;
2040 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2041 (tk->ntp_error_shift + shift);
2042
2043 return offset;
2044}
2045
2046
2047
2048
2049
2050static void timekeeping_advance(enum timekeeping_adv_mode mode)
2051{
2052 struct timekeeper *real_tk = &tk_core.timekeeper;
2053 struct timekeeper *tk = &shadow_timekeeper;
2054 u64 offset;
2055 int shift = 0, maxshift;
2056 unsigned int clock_set = 0;
2057 unsigned long flags;
2058
2059 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2060
2061
2062 if (unlikely(timekeeping_suspended))
2063 goto out;
2064
2065#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2066 offset = real_tk->cycle_interval;
2067
2068 if (mode != TK_ADV_TICK)
2069 goto out;
2070#else
2071 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2072 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2073
2074
2075 if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2076 goto out;
2077#endif
2078
2079
2080 timekeeping_check_update(tk, offset);
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2091 shift = max(0, shift);
2092
2093 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2094 shift = min(shift, maxshift);
2095 while (offset >= tk->cycle_interval) {
2096 offset = logarithmic_accumulation(tk, offset, shift,
2097 &clock_set);
2098 if (offset < tk->cycle_interval<<shift)
2099 shift--;
2100 }
2101
2102
2103 timekeeping_adjust(tk, offset);
2104
2105
2106
2107
2108
2109 clock_set |= accumulate_nsecs_to_secs(tk);
2110
2111 write_seqcount_begin(&tk_core.seq);
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122 timekeeping_update(tk, clock_set);
2123 memcpy(real_tk, tk, sizeof(*tk));
2124
2125 write_seqcount_end(&tk_core.seq);
2126out:
2127 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2128 if (clock_set)
2129
2130 clock_was_set_delayed();
2131}
2132
2133
2134
2135
2136
2137void update_wall_time(void)
2138{
2139 timekeeping_advance(TK_ADV_TICK);
2140}
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153void getboottime64(struct timespec64 *ts)
2154{
2155 struct timekeeper *tk = &tk_core.timekeeper;
2156 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2157
2158 *ts = ktime_to_timespec64(t);
2159}
2160EXPORT_SYMBOL_GPL(getboottime64);
2161
2162void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2163{
2164 struct timekeeper *tk = &tk_core.timekeeper;
2165 unsigned int seq;
2166
2167 do {
2168 seq = read_seqcount_begin(&tk_core.seq);
2169
2170 *ts = tk_xtime(tk);
2171 } while (read_seqcount_retry(&tk_core.seq, seq));
2172}
2173EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2174
2175void ktime_get_coarse_ts64(struct timespec64 *ts)
2176{
2177 struct timekeeper *tk = &tk_core.timekeeper;
2178 struct timespec64 now, mono;
2179 unsigned int seq;
2180
2181 do {
2182 seq = read_seqcount_begin(&tk_core.seq);
2183
2184 now = tk_xtime(tk);
2185 mono = tk->wall_to_monotonic;
2186 } while (read_seqcount_retry(&tk_core.seq, seq));
2187
2188 set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2189 now.tv_nsec + mono.tv_nsec);
2190}
2191EXPORT_SYMBOL(ktime_get_coarse_ts64);
2192
2193
2194
2195
2196void do_timer(unsigned long ticks)
2197{
2198 jiffies_64 += ticks;
2199 calc_global_load();
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2216 ktime_t *offs_boot, ktime_t *offs_tai)
2217{
2218 struct timekeeper *tk = &tk_core.timekeeper;
2219 unsigned int seq;
2220 ktime_t base;
2221 u64 nsecs;
2222
2223 do {
2224 seq = read_seqcount_begin(&tk_core.seq);
2225
2226 base = tk->tkr_mono.base;
2227 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2228 base = ktime_add_ns(base, nsecs);
2229
2230 if (*cwsseq != tk->clock_was_set_seq) {
2231 *cwsseq = tk->clock_was_set_seq;
2232 *offs_real = tk->offs_real;
2233 *offs_boot = tk->offs_boot;
2234 *offs_tai = tk->offs_tai;
2235 }
2236
2237
2238 if (unlikely(base >= tk->next_leap_ktime))
2239 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2240
2241 } while (read_seqcount_retry(&tk_core.seq, seq));
2242
2243 return base;
2244}
2245
2246
2247
2248
2249static int timekeeping_validate_timex(const struct __kernel_timex *txc)
2250{
2251 if (txc->modes & ADJ_ADJTIME) {
2252
2253 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2254 return -EINVAL;
2255 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2256 !capable(CAP_SYS_TIME))
2257 return -EPERM;
2258 } else {
2259
2260 if (txc->modes && !capable(CAP_SYS_TIME))
2261 return -EPERM;
2262
2263
2264
2265
2266 if (txc->modes & ADJ_TICK &&
2267 (txc->tick < 900000/USER_HZ ||
2268 txc->tick > 1100000/USER_HZ))
2269 return -EINVAL;
2270 }
2271
2272 if (txc->modes & ADJ_SETOFFSET) {
2273
2274 if (!capable(CAP_SYS_TIME))
2275 return -EPERM;
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285 if (txc->time.tv_usec < 0)
2286 return -EINVAL;
2287
2288 if (txc->modes & ADJ_NANO) {
2289 if (txc->time.tv_usec >= NSEC_PER_SEC)
2290 return -EINVAL;
2291 } else {
2292 if (txc->time.tv_usec >= USEC_PER_SEC)
2293 return -EINVAL;
2294 }
2295 }
2296
2297
2298
2299
2300
2301 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2302 if (LLONG_MIN / PPM_SCALE > txc->freq)
2303 return -EINVAL;
2304 if (LLONG_MAX / PPM_SCALE < txc->freq)
2305 return -EINVAL;
2306 }
2307
2308 return 0;
2309}
2310
2311
2312
2313
2314
2315int do_adjtimex(struct __kernel_timex *txc)
2316{
2317 struct timekeeper *tk = &tk_core.timekeeper;
2318 struct audit_ntp_data ad;
2319 unsigned long flags;
2320 struct timespec64 ts;
2321 s32 orig_tai, tai;
2322 int ret;
2323
2324
2325 ret = timekeeping_validate_timex(txc);
2326 if (ret)
2327 return ret;
2328
2329 if (txc->modes & ADJ_SETOFFSET) {
2330 struct timespec64 delta;
2331 delta.tv_sec = txc->time.tv_sec;
2332 delta.tv_nsec = txc->time.tv_usec;
2333 if (!(txc->modes & ADJ_NANO))
2334 delta.tv_nsec *= 1000;
2335 ret = timekeeping_inject_offset(&delta);
2336 if (ret)
2337 return ret;
2338
2339 audit_tk_injoffset(delta);
2340 }
2341
2342 audit_ntp_init(&ad);
2343
2344 ktime_get_real_ts64(&ts);
2345
2346 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2347 write_seqcount_begin(&tk_core.seq);
2348
2349 orig_tai = tai = tk->tai_offset;
2350 ret = __do_adjtimex(txc, &ts, &tai, &ad);
2351
2352 if (tai != orig_tai) {
2353 __timekeeping_set_tai_offset(tk, tai);
2354 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2355 }
2356 tk_update_leap_state(tk);
2357
2358 write_seqcount_end(&tk_core.seq);
2359 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2360
2361 audit_ntp_log(&ad);
2362
2363
2364 if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2365 timekeeping_advance(TK_ADV_FREQ);
2366
2367 if (tai != orig_tai)
2368 clock_was_set();
2369
2370 ntp_notify_cmos_timer();
2371
2372 return ret;
2373}
2374
2375#ifdef CONFIG_NTP_PPS
2376
2377
2378
2379void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2380{
2381 unsigned long flags;
2382
2383 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2384 write_seqcount_begin(&tk_core.seq);
2385
2386 __hardpps(phase_ts, raw_ts);
2387
2388 write_seqcount_end(&tk_core.seq);
2389 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2390}
2391EXPORT_SYMBOL(hardpps);
2392#endif
2393
2394
2395
2396
2397
2398
2399
2400void xtime_update(unsigned long ticks)
2401{
2402 raw_spin_lock(&jiffies_lock);
2403 write_seqcount_begin(&jiffies_seq);
2404 do_timer(ticks);
2405 write_seqcount_end(&jiffies_seq);
2406 raw_spin_unlock(&jiffies_lock);
2407 update_wall_time();
2408}
2409