1
2
3
4
5
6
7
8
9
10
11#include <linux/timekeeper_internal.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/nmi.h>
18#include <linux/sched.h>
19#include <linux/sched/loadavg.h>
20#include <linux/sched/clock.h>
21#include <linux/syscore_ops.h>
22#include <linux/clocksource.h>
23#include <linux/jiffies.h>
24#include <linux/time.h>
25#include <linux/tick.h>
26#include <linux/stop_machine.h>
27#include <linux/pvclock_gtod.h>
28#include <linux/compiler.h>
29
30#include "tick-internal.h"
31#include "ntp_internal.h"
32#include "timekeeping_internal.h"
33
34#define TK_CLEAR_NTP (1 << 0)
35#define TK_MIRROR (1 << 1)
36#define TK_CLOCK_WAS_SET (1 << 2)
37
38enum timekeeping_adv_mode {
39
40 TK_ADV_TICK,
41
42
43 TK_ADV_FREQ
44};
45
46
47
48
49
50static struct {
51 seqcount_t seq;
52 struct timekeeper timekeeper;
53} tk_core ____cacheline_aligned;
54
55static DEFINE_RAW_SPINLOCK(timekeeper_lock);
56static struct timekeeper shadow_timekeeper;
57
58
59
60
61
62
63
64
65
66
67struct tk_fast {
68 seqcount_t seq;
69 struct tk_read_base base[2];
70};
71
72
73static u64 cycles_at_suspend;
74
75static u64 dummy_clock_read(struct clocksource *cs)
76{
77 return cycles_at_suspend;
78}
79
80static struct clocksource dummy_clock = {
81 .read = dummy_clock_read,
82};
83
84static struct tk_fast tk_fast_mono ____cacheline_aligned = {
85 .base[0] = { .clock = &dummy_clock, },
86 .base[1] = { .clock = &dummy_clock, },
87};
88
89static struct tk_fast tk_fast_raw ____cacheline_aligned = {
90 .base[0] = { .clock = &dummy_clock, },
91 .base[1] = { .clock = &dummy_clock, },
92};
93
94
95int __read_mostly timekeeping_suspended;
96
97static inline void tk_normalize_xtime(struct timekeeper *tk)
98{
99 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
100 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
101 tk->xtime_sec++;
102 }
103 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
104 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
105 tk->raw_sec++;
106 }
107}
108
109static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
110{
111 struct timespec64 ts;
112
113 ts.tv_sec = tk->xtime_sec;
114 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
115 return ts;
116}
117
118static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
119{
120 tk->xtime_sec = ts->tv_sec;
121 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
122}
123
124static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
125{
126 tk->xtime_sec += ts->tv_sec;
127 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
128 tk_normalize_xtime(tk);
129}
130
131static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
132{
133 struct timespec64 tmp;
134
135
136
137
138
139 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
140 -tk->wall_to_monotonic.tv_nsec);
141 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
142 tk->wall_to_monotonic = wtm;
143 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
144 tk->offs_real = timespec64_to_ktime(tmp);
145 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
146}
147
148static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
149{
150 tk->offs_boot = ktime_add(tk->offs_boot, delta);
151}
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166static inline u64 tk_clock_read(const struct tk_read_base *tkr)
167{
168 struct clocksource *clock = READ_ONCE(tkr->clock);
169
170 return clock->read(clock);
171}
172
173#ifdef CONFIG_DEBUG_TIMEKEEPING
174#define WARNING_FREQ (HZ*300)
175
176static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
177{
178
179 u64 max_cycles = tk->tkr_mono.clock->max_cycles;
180 const char *name = tk->tkr_mono.clock->name;
181
182 if (offset > max_cycles) {
183 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
184 offset, name, max_cycles);
185 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
186 } else {
187 if (offset > (max_cycles >> 1)) {
188 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
189 offset, name, max_cycles >> 1);
190 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
191 }
192 }
193
194 if (tk->underflow_seen) {
195 if (jiffies - tk->last_warning > WARNING_FREQ) {
196 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
197 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
198 printk_deferred(" Your kernel is probably still fine.\n");
199 tk->last_warning = jiffies;
200 }
201 tk->underflow_seen = 0;
202 }
203
204 if (tk->overflow_seen) {
205 if (jiffies - tk->last_warning > WARNING_FREQ) {
206 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
207 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
208 printk_deferred(" Your kernel is probably still fine.\n");
209 tk->last_warning = jiffies;
210 }
211 tk->overflow_seen = 0;
212 }
213}
214
215static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
216{
217 struct timekeeper *tk = &tk_core.timekeeper;
218 u64 now, last, mask, max, delta;
219 unsigned int seq;
220
221
222
223
224
225
226
227
228 do {
229 seq = read_seqcount_begin(&tk_core.seq);
230 now = tk_clock_read(tkr);
231 last = tkr->cycle_last;
232 mask = tkr->mask;
233 max = tkr->clock->max_cycles;
234 } while (read_seqcount_retry(&tk_core.seq, seq));
235
236 delta = clocksource_delta(now, last, mask);
237
238
239
240
241
242 if (unlikely((~delta & mask) < (mask >> 3))) {
243 tk->underflow_seen = 1;
244 delta = 0;
245 }
246
247
248 if (unlikely(delta > max)) {
249 tk->overflow_seen = 1;
250 delta = tkr->clock->max_cycles;
251 }
252
253 return delta;
254}
255#else
256static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
257{
258}
259static inline u64 timekeeping_get_delta(const struct tk_read_base *tkr)
260{
261 u64 cycle_now, delta;
262
263
264 cycle_now = tk_clock_read(tkr);
265
266
267 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
268
269 return delta;
270}
271#endif
272
273
274
275
276
277
278
279
280
281
282
283
284static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
285{
286 u64 interval;
287 u64 tmp, ntpinterval;
288 struct clocksource *old_clock;
289
290 ++tk->cs_was_changed_seq;
291 old_clock = tk->tkr_mono.clock;
292 tk->tkr_mono.clock = clock;
293 tk->tkr_mono.mask = clock->mask;
294 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
295
296 tk->tkr_raw.clock = clock;
297 tk->tkr_raw.mask = clock->mask;
298 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
299
300
301 tmp = NTP_INTERVAL_LENGTH;
302 tmp <<= clock->shift;
303 ntpinterval = tmp;
304 tmp += clock->mult/2;
305 do_div(tmp, clock->mult);
306 if (tmp == 0)
307 tmp = 1;
308
309 interval = (u64) tmp;
310 tk->cycle_interval = interval;
311
312
313 tk->xtime_interval = interval * clock->mult;
314 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
315 tk->raw_interval = interval * clock->mult;
316
317
318 if (old_clock) {
319 int shift_change = clock->shift - old_clock->shift;
320 if (shift_change < 0) {
321 tk->tkr_mono.xtime_nsec >>= -shift_change;
322 tk->tkr_raw.xtime_nsec >>= -shift_change;
323 } else {
324 tk->tkr_mono.xtime_nsec <<= shift_change;
325 tk->tkr_raw.xtime_nsec <<= shift_change;
326 }
327 }
328
329 tk->tkr_mono.shift = clock->shift;
330 tk->tkr_raw.shift = clock->shift;
331
332 tk->ntp_error = 0;
333 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
334 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
335
336
337
338
339
340
341 tk->tkr_mono.mult = clock->mult;
342 tk->tkr_raw.mult = clock->mult;
343 tk->ntp_err_mult = 0;
344 tk->skip_second_overflow = 0;
345}
346
347
348
349#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
350static u32 default_arch_gettimeoffset(void) { return 0; }
351u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
352#else
353static inline u32 arch_gettimeoffset(void) { return 0; }
354#endif
355
356static inline u64 timekeeping_delta_to_ns(const struct tk_read_base *tkr, u64 delta)
357{
358 u64 nsec;
359
360 nsec = delta * tkr->mult + tkr->xtime_nsec;
361 nsec >>= tkr->shift;
362
363
364 return nsec + arch_gettimeoffset();
365}
366
367static inline u64 timekeeping_get_ns(const struct tk_read_base *tkr)
368{
369 u64 delta;
370
371 delta = timekeeping_get_delta(tkr);
372 return timekeeping_delta_to_ns(tkr, delta);
373}
374
375static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
376{
377 u64 delta;
378
379
380 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
381 return timekeeping_delta_to_ns(tkr, delta);
382}
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398static void update_fast_timekeeper(const struct tk_read_base *tkr,
399 struct tk_fast *tkf)
400{
401 struct tk_read_base *base = tkf->base;
402
403
404 raw_write_seqcount_latch(&tkf->seq);
405
406
407 memcpy(base, tkr, sizeof(*base));
408
409
410 raw_write_seqcount_latch(&tkf->seq);
411
412
413 memcpy(base + 1, base, sizeof(*base));
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
449{
450 struct tk_read_base *tkr;
451 unsigned int seq;
452 u64 now;
453
454 do {
455 seq = raw_read_seqcount_latch(&tkf->seq);
456 tkr = tkf->base + (seq & 0x01);
457 now = ktime_to_ns(tkr->base);
458
459 now += timekeeping_delta_to_ns(tkr,
460 clocksource_delta(
461 tk_clock_read(tkr),
462 tkr->cycle_last,
463 tkr->mask));
464 } while (read_seqcount_retry(&tkf->seq, seq));
465
466 return now;
467}
468
469u64 ktime_get_mono_fast_ns(void)
470{
471 return __ktime_get_fast_ns(&tk_fast_mono);
472}
473EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
474
475u64 ktime_get_raw_fast_ns(void)
476{
477 return __ktime_get_fast_ns(&tk_fast_raw);
478}
479EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502u64 notrace ktime_get_boot_fast_ns(void)
503{
504 struct timekeeper *tk = &tk_core.timekeeper;
505
506 return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
507}
508EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
509
510
511
512
513
514static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
515{
516 struct tk_read_base *tkr;
517 unsigned int seq;
518 u64 now;
519
520 do {
521 seq = raw_read_seqcount_latch(&tkf->seq);
522 tkr = tkf->base + (seq & 0x01);
523 now = ktime_to_ns(tkr->base_real);
524
525 now += timekeeping_delta_to_ns(tkr,
526 clocksource_delta(
527 tk_clock_read(tkr),
528 tkr->cycle_last,
529 tkr->mask));
530 } while (read_seqcount_retry(&tkf->seq, seq));
531
532 return now;
533}
534
535
536
537
538u64 ktime_get_real_fast_ns(void)
539{
540 return __ktime_get_real_fast_ns(&tk_fast_mono);
541}
542EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
543
544
545
546
547
548
549
550
551
552
553
554static void halt_fast_timekeeper(const struct timekeeper *tk)
555{
556 static struct tk_read_base tkr_dummy;
557 const struct tk_read_base *tkr = &tk->tkr_mono;
558
559 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
560 cycles_at_suspend = tk_clock_read(tkr);
561 tkr_dummy.clock = &dummy_clock;
562 tkr_dummy.base_real = tkr->base + tk->offs_real;
563 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
564
565 tkr = &tk->tkr_raw;
566 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
567 tkr_dummy.clock = &dummy_clock;
568 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
569}
570
571static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
572
573static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
574{
575 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
576}
577
578
579
580
581int pvclock_gtod_register_notifier(struct notifier_block *nb)
582{
583 struct timekeeper *tk = &tk_core.timekeeper;
584 unsigned long flags;
585 int ret;
586
587 raw_spin_lock_irqsave(&timekeeper_lock, flags);
588 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
589 update_pvclock_gtod(tk, true);
590 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
591
592 return ret;
593}
594EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
595
596
597
598
599
600int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
601{
602 unsigned long flags;
603 int ret;
604
605 raw_spin_lock_irqsave(&timekeeper_lock, flags);
606 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
607 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
608
609 return ret;
610}
611EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
612
613
614
615
616static inline void tk_update_leap_state(struct timekeeper *tk)
617{
618 tk->next_leap_ktime = ntp_get_next_leap();
619 if (tk->next_leap_ktime != KTIME_MAX)
620
621 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
622}
623
624
625
626
627static inline void tk_update_ktime_data(struct timekeeper *tk)
628{
629 u64 seconds;
630 u32 nsec;
631
632
633
634
635
636
637
638
639 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
640 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
641 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
642
643
644
645
646
647
648 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
649 if (nsec >= NSEC_PER_SEC)
650 seconds++;
651 tk->ktime_sec = seconds;
652
653
654 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
655}
656
657
658static void timekeeping_update(struct timekeeper *tk, unsigned int action)
659{
660 if (action & TK_CLEAR_NTP) {
661 tk->ntp_error = 0;
662 ntp_clear();
663 }
664
665 tk_update_leap_state(tk);
666 tk_update_ktime_data(tk);
667
668 update_vsyscall(tk);
669 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
670
671 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
672 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
673 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
674
675 if (action & TK_CLOCK_WAS_SET)
676 tk->clock_was_set_seq++;
677
678
679
680
681
682 if (action & TK_MIRROR)
683 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
684 sizeof(tk_core.timekeeper));
685}
686
687
688
689
690
691
692
693
694static void timekeeping_forward_now(struct timekeeper *tk)
695{
696 u64 cycle_now, delta;
697
698 cycle_now = tk_clock_read(&tk->tkr_mono);
699 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
700 tk->tkr_mono.cycle_last = cycle_now;
701 tk->tkr_raw.cycle_last = cycle_now;
702
703 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
704
705
706 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
707
708
709 tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
710
711
712 tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
713
714 tk_normalize_xtime(tk);
715}
716
717
718
719
720
721
722
723void ktime_get_real_ts64(struct timespec64 *ts)
724{
725 struct timekeeper *tk = &tk_core.timekeeper;
726 unsigned long seq;
727 u64 nsecs;
728
729 WARN_ON(timekeeping_suspended);
730
731 do {
732 seq = read_seqcount_begin(&tk_core.seq);
733
734 ts->tv_sec = tk->xtime_sec;
735 nsecs = timekeeping_get_ns(&tk->tkr_mono);
736
737 } while (read_seqcount_retry(&tk_core.seq, seq));
738
739 ts->tv_nsec = 0;
740 timespec64_add_ns(ts, nsecs);
741}
742EXPORT_SYMBOL(ktime_get_real_ts64);
743
744ktime_t ktime_get(void)
745{
746 struct timekeeper *tk = &tk_core.timekeeper;
747 unsigned int seq;
748 ktime_t base;
749 u64 nsecs;
750
751 WARN_ON(timekeeping_suspended);
752
753 do {
754 seq = read_seqcount_begin(&tk_core.seq);
755 base = tk->tkr_mono.base;
756 nsecs = timekeeping_get_ns(&tk->tkr_mono);
757
758 } while (read_seqcount_retry(&tk_core.seq, seq));
759
760 return ktime_add_ns(base, nsecs);
761}
762EXPORT_SYMBOL_GPL(ktime_get);
763
764u32 ktime_get_resolution_ns(void)
765{
766 struct timekeeper *tk = &tk_core.timekeeper;
767 unsigned int seq;
768 u32 nsecs;
769
770 WARN_ON(timekeeping_suspended);
771
772 do {
773 seq = read_seqcount_begin(&tk_core.seq);
774 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
775 } while (read_seqcount_retry(&tk_core.seq, seq));
776
777 return nsecs;
778}
779EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
780
781static ktime_t *offsets[TK_OFFS_MAX] = {
782 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
783 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
784 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
785};
786
787ktime_t ktime_get_with_offset(enum tk_offsets offs)
788{
789 struct timekeeper *tk = &tk_core.timekeeper;
790 unsigned int seq;
791 ktime_t base, *offset = offsets[offs];
792 u64 nsecs;
793
794 WARN_ON(timekeeping_suspended);
795
796 do {
797 seq = read_seqcount_begin(&tk_core.seq);
798 base = ktime_add(tk->tkr_mono.base, *offset);
799 nsecs = timekeeping_get_ns(&tk->tkr_mono);
800
801 } while (read_seqcount_retry(&tk_core.seq, seq));
802
803 return ktime_add_ns(base, nsecs);
804
805}
806EXPORT_SYMBOL_GPL(ktime_get_with_offset);
807
808ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
809{
810 struct timekeeper *tk = &tk_core.timekeeper;
811 unsigned int seq;
812 ktime_t base, *offset = offsets[offs];
813
814 WARN_ON(timekeeping_suspended);
815
816 do {
817 seq = read_seqcount_begin(&tk_core.seq);
818 base = ktime_add(tk->tkr_mono.base, *offset);
819
820 } while (read_seqcount_retry(&tk_core.seq, seq));
821
822 return base;
823
824}
825EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
826
827
828
829
830
831
832ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
833{
834 ktime_t *offset = offsets[offs];
835 unsigned long seq;
836 ktime_t tconv;
837
838 do {
839 seq = read_seqcount_begin(&tk_core.seq);
840 tconv = ktime_add(tmono, *offset);
841 } while (read_seqcount_retry(&tk_core.seq, seq));
842
843 return tconv;
844}
845EXPORT_SYMBOL_GPL(ktime_mono_to_any);
846
847
848
849
850ktime_t ktime_get_raw(void)
851{
852 struct timekeeper *tk = &tk_core.timekeeper;
853 unsigned int seq;
854 ktime_t base;
855 u64 nsecs;
856
857 do {
858 seq = read_seqcount_begin(&tk_core.seq);
859 base = tk->tkr_raw.base;
860 nsecs = timekeeping_get_ns(&tk->tkr_raw);
861
862 } while (read_seqcount_retry(&tk_core.seq, seq));
863
864 return ktime_add_ns(base, nsecs);
865}
866EXPORT_SYMBOL_GPL(ktime_get_raw);
867
868
869
870
871
872
873
874
875
876void ktime_get_ts64(struct timespec64 *ts)
877{
878 struct timekeeper *tk = &tk_core.timekeeper;
879 struct timespec64 tomono;
880 unsigned int seq;
881 u64 nsec;
882
883 WARN_ON(timekeeping_suspended);
884
885 do {
886 seq = read_seqcount_begin(&tk_core.seq);
887 ts->tv_sec = tk->xtime_sec;
888 nsec = timekeeping_get_ns(&tk->tkr_mono);
889 tomono = tk->wall_to_monotonic;
890
891 } while (read_seqcount_retry(&tk_core.seq, seq));
892
893 ts->tv_sec += tomono.tv_sec;
894 ts->tv_nsec = 0;
895 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
896}
897EXPORT_SYMBOL_GPL(ktime_get_ts64);
898
899
900
901
902
903
904
905
906
907
908time64_t ktime_get_seconds(void)
909{
910 struct timekeeper *tk = &tk_core.timekeeper;
911
912 WARN_ON(timekeeping_suspended);
913 return tk->ktime_sec;
914}
915EXPORT_SYMBOL_GPL(ktime_get_seconds);
916
917
918
919
920
921
922
923
924
925
926
927
928time64_t ktime_get_real_seconds(void)
929{
930 struct timekeeper *tk = &tk_core.timekeeper;
931 time64_t seconds;
932 unsigned int seq;
933
934 if (IS_ENABLED(CONFIG_64BIT))
935 return tk->xtime_sec;
936
937 do {
938 seq = read_seqcount_begin(&tk_core.seq);
939 seconds = tk->xtime_sec;
940
941 } while (read_seqcount_retry(&tk_core.seq, seq));
942
943 return seconds;
944}
945EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
946
947
948
949
950
951
952time64_t __ktime_get_real_seconds(void)
953{
954 struct timekeeper *tk = &tk_core.timekeeper;
955
956 return tk->xtime_sec;
957}
958
959
960
961
962
963void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
964{
965 struct timekeeper *tk = &tk_core.timekeeper;
966 unsigned long seq;
967 ktime_t base_raw;
968 ktime_t base_real;
969 u64 nsec_raw;
970 u64 nsec_real;
971 u64 now;
972
973 WARN_ON_ONCE(timekeeping_suspended);
974
975 do {
976 seq = read_seqcount_begin(&tk_core.seq);
977 now = tk_clock_read(&tk->tkr_mono);
978 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
979 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
980 base_real = ktime_add(tk->tkr_mono.base,
981 tk_core.timekeeper.offs_real);
982 base_raw = tk->tkr_raw.base;
983 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
984 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
985 } while (read_seqcount_retry(&tk_core.seq, seq));
986
987 systime_snapshot->cycles = now;
988 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
989 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
990}
991EXPORT_SYMBOL_GPL(ktime_get_snapshot);
992
993
994static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
995{
996 u64 tmp, rem;
997
998 tmp = div64_u64_rem(*base, div, &rem);
999
1000 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
1001 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
1002 return -EOVERFLOW;
1003 tmp *= mult;
1004 rem *= mult;
1005
1006 do_div(rem, div);
1007 *base = tmp + rem;
1008 return 0;
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1029 u64 partial_history_cycles,
1030 u64 total_history_cycles,
1031 bool discontinuity,
1032 struct system_device_crosststamp *ts)
1033{
1034 struct timekeeper *tk = &tk_core.timekeeper;
1035 u64 corr_raw, corr_real;
1036 bool interp_forward;
1037 int ret;
1038
1039 if (total_history_cycles == 0 || partial_history_cycles == 0)
1040 return 0;
1041
1042
1043 interp_forward = partial_history_cycles > total_history_cycles / 2;
1044 partial_history_cycles = interp_forward ?
1045 total_history_cycles - partial_history_cycles :
1046 partial_history_cycles;
1047
1048
1049
1050
1051
1052 corr_raw = (u64)ktime_to_ns(
1053 ktime_sub(ts->sys_monoraw, history->raw));
1054 ret = scale64_check_overflow(partial_history_cycles,
1055 total_history_cycles, &corr_raw);
1056 if (ret)
1057 return ret;
1058
1059
1060
1061
1062
1063
1064
1065
1066 if (discontinuity) {
1067 corr_real = mul_u64_u32_div
1068 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1069 } else {
1070 corr_real = (u64)ktime_to_ns(
1071 ktime_sub(ts->sys_realtime, history->real));
1072 ret = scale64_check_overflow(partial_history_cycles,
1073 total_history_cycles, &corr_real);
1074 if (ret)
1075 return ret;
1076 }
1077
1078
1079 if (interp_forward) {
1080 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1081 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1082 } else {
1083 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1084 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1085 }
1086
1087 return 0;
1088}
1089
1090
1091
1092
1093static bool cycle_between(u64 before, u64 test, u64 after)
1094{
1095 if (test > before && test < after)
1096 return true;
1097 if (test < before && before > after)
1098 return true;
1099 return false;
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113int get_device_system_crosststamp(int (*get_time_fn)
1114 (ktime_t *device_time,
1115 struct system_counterval_t *sys_counterval,
1116 void *ctx),
1117 void *ctx,
1118 struct system_time_snapshot *history_begin,
1119 struct system_device_crosststamp *xtstamp)
1120{
1121 struct system_counterval_t system_counterval;
1122 struct timekeeper *tk = &tk_core.timekeeper;
1123 u64 cycles, now, interval_start;
1124 unsigned int clock_was_set_seq = 0;
1125 ktime_t base_real, base_raw;
1126 u64 nsec_real, nsec_raw;
1127 u8 cs_was_changed_seq;
1128 unsigned long seq;
1129 bool do_interp;
1130 int ret;
1131
1132 do {
1133 seq = read_seqcount_begin(&tk_core.seq);
1134
1135
1136
1137
1138 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1139 if (ret)
1140 return ret;
1141
1142
1143
1144
1145
1146
1147 if (tk->tkr_mono.clock != system_counterval.cs)
1148 return -ENODEV;
1149 cycles = system_counterval.cycles;
1150
1151
1152
1153
1154
1155 now = tk_clock_read(&tk->tkr_mono);
1156 interval_start = tk->tkr_mono.cycle_last;
1157 if (!cycle_between(interval_start, cycles, now)) {
1158 clock_was_set_seq = tk->clock_was_set_seq;
1159 cs_was_changed_seq = tk->cs_was_changed_seq;
1160 cycles = interval_start;
1161 do_interp = true;
1162 } else {
1163 do_interp = false;
1164 }
1165
1166 base_real = ktime_add(tk->tkr_mono.base,
1167 tk_core.timekeeper.offs_real);
1168 base_raw = tk->tkr_raw.base;
1169
1170 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1171 system_counterval.cycles);
1172 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1173 system_counterval.cycles);
1174 } while (read_seqcount_retry(&tk_core.seq, seq));
1175
1176 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1177 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1178
1179
1180
1181
1182
1183 if (do_interp) {
1184 u64 partial_history_cycles, total_history_cycles;
1185 bool discontinuity;
1186
1187
1188
1189
1190
1191
1192 if (!history_begin ||
1193 !cycle_between(history_begin->cycles,
1194 system_counterval.cycles, cycles) ||
1195 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1196 return -EINVAL;
1197 partial_history_cycles = cycles - system_counterval.cycles;
1198 total_history_cycles = cycles - history_begin->cycles;
1199 discontinuity =
1200 history_begin->clock_was_set_seq != clock_was_set_seq;
1201
1202 ret = adjust_historical_crosststamp(history_begin,
1203 partial_history_cycles,
1204 total_history_cycles,
1205 discontinuity, xtstamp);
1206 if (ret)
1207 return ret;
1208 }
1209
1210 return 0;
1211}
1212EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1213
1214
1215
1216
1217
1218
1219
1220void do_gettimeofday(struct timeval *tv)
1221{
1222 struct timespec64 now;
1223
1224 getnstimeofday64(&now);
1225 tv->tv_sec = now.tv_sec;
1226 tv->tv_usec = now.tv_nsec/1000;
1227}
1228EXPORT_SYMBOL(do_gettimeofday);
1229
1230
1231
1232
1233
1234
1235
1236int do_settimeofday64(const struct timespec64 *ts)
1237{
1238 struct timekeeper *tk = &tk_core.timekeeper;
1239 struct timespec64 ts_delta, xt;
1240 unsigned long flags;
1241 int ret = 0;
1242
1243 if (!timespec64_valid_strict(ts))
1244 return -EINVAL;
1245
1246 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1247 write_seqcount_begin(&tk_core.seq);
1248
1249 timekeeping_forward_now(tk);
1250
1251 xt = tk_xtime(tk);
1252 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1253 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1254
1255 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1256 ret = -EINVAL;
1257 goto out;
1258 }
1259
1260 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1261
1262 tk_set_xtime(tk, ts);
1263out:
1264 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1265
1266 write_seqcount_end(&tk_core.seq);
1267 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1268
1269
1270 clock_was_set();
1271
1272 return ret;
1273}
1274EXPORT_SYMBOL(do_settimeofday64);
1275
1276
1277
1278
1279
1280
1281
1282static int timekeeping_inject_offset(const struct timespec64 *ts)
1283{
1284 struct timekeeper *tk = &tk_core.timekeeper;
1285 unsigned long flags;
1286 struct timespec64 tmp;
1287 int ret = 0;
1288
1289 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1290 return -EINVAL;
1291
1292 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1293 write_seqcount_begin(&tk_core.seq);
1294
1295 timekeeping_forward_now(tk);
1296
1297
1298 tmp = timespec64_add(tk_xtime(tk), *ts);
1299 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1300 !timespec64_valid_strict(&tmp)) {
1301 ret = -EINVAL;
1302 goto error;
1303 }
1304
1305 tk_xtime_add(tk, ts);
1306 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1307
1308error:
1309 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1310
1311 write_seqcount_end(&tk_core.seq);
1312 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1313
1314
1315 clock_was_set();
1316
1317 return ret;
1318}
1319
1320
1321
1322
1323
1324int persistent_clock_is_local;
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342void timekeeping_warp_clock(void)
1343{
1344 if (sys_tz.tz_minuteswest != 0) {
1345 struct timespec64 adjust;
1346
1347 persistent_clock_is_local = 1;
1348 adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1349 adjust.tv_nsec = 0;
1350 timekeeping_inject_offset(&adjust);
1351 }
1352}
1353
1354
1355
1356
1357
1358static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1359{
1360 tk->tai_offset = tai_offset;
1361 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1362}
1363
1364
1365
1366
1367
1368
1369static int change_clocksource(void *data)
1370{
1371 struct timekeeper *tk = &tk_core.timekeeper;
1372 struct clocksource *new, *old;
1373 unsigned long flags;
1374
1375 new = (struct clocksource *) data;
1376
1377 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1378 write_seqcount_begin(&tk_core.seq);
1379
1380 timekeeping_forward_now(tk);
1381
1382
1383
1384
1385 if (try_module_get(new->owner)) {
1386 if (!new->enable || new->enable(new) == 0) {
1387 old = tk->tkr_mono.clock;
1388 tk_setup_internals(tk, new);
1389 if (old->disable)
1390 old->disable(old);
1391 module_put(old->owner);
1392 } else {
1393 module_put(new->owner);
1394 }
1395 }
1396 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1397
1398 write_seqcount_end(&tk_core.seq);
1399 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1400
1401 return 0;
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411int timekeeping_notify(struct clocksource *clock)
1412{
1413 struct timekeeper *tk = &tk_core.timekeeper;
1414
1415 if (tk->tkr_mono.clock == clock)
1416 return 0;
1417 stop_machine(change_clocksource, clock, NULL);
1418 tick_clock_notify();
1419 return tk->tkr_mono.clock == clock ? 0 : -1;
1420}
1421
1422
1423
1424
1425
1426
1427
1428void ktime_get_raw_ts64(struct timespec64 *ts)
1429{
1430 struct timekeeper *tk = &tk_core.timekeeper;
1431 unsigned long seq;
1432 u64 nsecs;
1433
1434 do {
1435 seq = read_seqcount_begin(&tk_core.seq);
1436 ts->tv_sec = tk->raw_sec;
1437 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1438
1439 } while (read_seqcount_retry(&tk_core.seq, seq));
1440
1441 ts->tv_nsec = 0;
1442 timespec64_add_ns(ts, nsecs);
1443}
1444EXPORT_SYMBOL(ktime_get_raw_ts64);
1445
1446
1447
1448
1449
1450int timekeeping_valid_for_hres(void)
1451{
1452 struct timekeeper *tk = &tk_core.timekeeper;
1453 unsigned long seq;
1454 int ret;
1455
1456 do {
1457 seq = read_seqcount_begin(&tk_core.seq);
1458
1459 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1460
1461 } while (read_seqcount_retry(&tk_core.seq, seq));
1462
1463 return ret;
1464}
1465
1466
1467
1468
1469u64 timekeeping_max_deferment(void)
1470{
1471 struct timekeeper *tk = &tk_core.timekeeper;
1472 unsigned long seq;
1473 u64 ret;
1474
1475 do {
1476 seq = read_seqcount_begin(&tk_core.seq);
1477
1478 ret = tk->tkr_mono.clock->max_idle_ns;
1479
1480 } while (read_seqcount_retry(&tk_core.seq, seq));
1481
1482 return ret;
1483}
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494void __weak read_persistent_clock(struct timespec *ts)
1495{
1496 ts->tv_sec = 0;
1497 ts->tv_nsec = 0;
1498}
1499
1500void __weak read_persistent_clock64(struct timespec64 *ts64)
1501{
1502 struct timespec ts;
1503
1504 read_persistent_clock(&ts);
1505 *ts64 = timespec_to_timespec64(ts);
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520void __weak __init
1521read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
1522 struct timespec64 *boot_offset)
1523{
1524 read_persistent_clock64(wall_time);
1525 *boot_offset = ns_to_timespec64(local_clock());
1526}
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541static bool suspend_timing_needed;
1542
1543
1544static bool persistent_clock_exists;
1545
1546
1547
1548
1549void __init timekeeping_init(void)
1550{
1551 struct timespec64 wall_time, boot_offset, wall_to_mono;
1552 struct timekeeper *tk = &tk_core.timekeeper;
1553 struct clocksource *clock;
1554 unsigned long flags;
1555
1556 read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
1557 if (timespec64_valid_strict(&wall_time) &&
1558 timespec64_to_ns(&wall_time) > 0) {
1559 persistent_clock_exists = true;
1560 } else if (timespec64_to_ns(&wall_time) != 0) {
1561 pr_warn("Persistent clock returned invalid value");
1562 wall_time = (struct timespec64){0};
1563 }
1564
1565 if (timespec64_compare(&wall_time, &boot_offset) < 0)
1566 boot_offset = (struct timespec64){0};
1567
1568
1569
1570
1571
1572 wall_to_mono = timespec64_sub(boot_offset, wall_time);
1573
1574 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1575 write_seqcount_begin(&tk_core.seq);
1576 ntp_init();
1577
1578 clock = clocksource_default_clock();
1579 if (clock->enable)
1580 clock->enable(clock);
1581 tk_setup_internals(tk, clock);
1582
1583 tk_set_xtime(tk, &wall_time);
1584 tk->raw_sec = 0;
1585
1586 tk_set_wall_to_mono(tk, wall_to_mono);
1587
1588 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1589
1590 write_seqcount_end(&tk_core.seq);
1591 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1592}
1593
1594
1595static struct timespec64 timekeeping_suspend_time;
1596
1597
1598
1599
1600
1601
1602
1603
1604static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1605 const struct timespec64 *delta)
1606{
1607 if (!timespec64_valid_strict(delta)) {
1608 printk_deferred(KERN_WARNING
1609 "__timekeeping_inject_sleeptime: Invalid "
1610 "sleep delta value!\n");
1611 return;
1612 }
1613 tk_xtime_add(tk, delta);
1614 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1615 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1616 tk_debug_account_sleep_time(delta);
1617}
1618
1619#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636bool timekeeping_rtc_skipresume(void)
1637{
1638 return !suspend_timing_needed;
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650bool timekeeping_rtc_skipsuspend(void)
1651{
1652 return persistent_clock_exists;
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666void timekeeping_inject_sleeptime64(const struct timespec64 *delta)
1667{
1668 struct timekeeper *tk = &tk_core.timekeeper;
1669 unsigned long flags;
1670
1671 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1672 write_seqcount_begin(&tk_core.seq);
1673
1674 suspend_timing_needed = false;
1675
1676 timekeeping_forward_now(tk);
1677
1678 __timekeeping_inject_sleeptime(tk, delta);
1679
1680 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1681
1682 write_seqcount_end(&tk_core.seq);
1683 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1684
1685
1686 clock_was_set();
1687}
1688#endif
1689
1690
1691
1692
1693void timekeeping_resume(void)
1694{
1695 struct timekeeper *tk = &tk_core.timekeeper;
1696 struct clocksource *clock = tk->tkr_mono.clock;
1697 unsigned long flags;
1698 struct timespec64 ts_new, ts_delta;
1699 u64 cycle_now, nsec;
1700 bool inject_sleeptime = false;
1701
1702 read_persistent_clock64(&ts_new);
1703
1704 clockevents_resume();
1705 clocksource_resume();
1706
1707 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1708 write_seqcount_begin(&tk_core.seq);
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722 cycle_now = tk_clock_read(&tk->tkr_mono);
1723 nsec = clocksource_stop_suspend_timing(clock, cycle_now);
1724 if (nsec > 0) {
1725 ts_delta = ns_to_timespec64(nsec);
1726 inject_sleeptime = true;
1727 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1728 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1729 inject_sleeptime = true;
1730 }
1731
1732 if (inject_sleeptime) {
1733 suspend_timing_needed = false;
1734 __timekeeping_inject_sleeptime(tk, &ts_delta);
1735 }
1736
1737
1738 tk->tkr_mono.cycle_last = cycle_now;
1739 tk->tkr_raw.cycle_last = cycle_now;
1740
1741 tk->ntp_error = 0;
1742 timekeeping_suspended = 0;
1743 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1744 write_seqcount_end(&tk_core.seq);
1745 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1746
1747 touch_softlockup_watchdog();
1748
1749 tick_resume();
1750 hrtimers_resume();
1751}
1752
1753int timekeeping_suspend(void)
1754{
1755 struct timekeeper *tk = &tk_core.timekeeper;
1756 unsigned long flags;
1757 struct timespec64 delta, delta_delta;
1758 static struct timespec64 old_delta;
1759 struct clocksource *curr_clock;
1760 u64 cycle_now;
1761
1762 read_persistent_clock64(&timekeeping_suspend_time);
1763
1764
1765
1766
1767
1768
1769 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1770 persistent_clock_exists = true;
1771
1772 suspend_timing_needed = true;
1773
1774 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1775 write_seqcount_begin(&tk_core.seq);
1776 timekeeping_forward_now(tk);
1777 timekeeping_suspended = 1;
1778
1779
1780
1781
1782
1783
1784 curr_clock = tk->tkr_mono.clock;
1785 cycle_now = tk->tkr_mono.cycle_last;
1786 clocksource_start_suspend_timing(curr_clock, cycle_now);
1787
1788 if (persistent_clock_exists) {
1789
1790
1791
1792
1793
1794
1795 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1796 delta_delta = timespec64_sub(delta, old_delta);
1797 if (abs(delta_delta.tv_sec) >= 2) {
1798
1799
1800
1801
1802 old_delta = delta;
1803 } else {
1804
1805 timekeeping_suspend_time =
1806 timespec64_add(timekeeping_suspend_time, delta_delta);
1807 }
1808 }
1809
1810 timekeeping_update(tk, TK_MIRROR);
1811 halt_fast_timekeeper(tk);
1812 write_seqcount_end(&tk_core.seq);
1813 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1814
1815 tick_suspend();
1816 clocksource_suspend();
1817 clockevents_suspend();
1818
1819 return 0;
1820}
1821
1822
1823static struct syscore_ops timekeeping_syscore_ops = {
1824 .resume = timekeeping_resume,
1825 .suspend = timekeeping_suspend,
1826};
1827
1828static int __init timekeeping_init_ops(void)
1829{
1830 register_syscore_ops(&timekeeping_syscore_ops);
1831 return 0;
1832}
1833device_initcall(timekeeping_init_ops);
1834
1835
1836
1837
1838static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1839 s64 offset,
1840 s32 mult_adj)
1841{
1842 s64 interval = tk->cycle_interval;
1843
1844 if (mult_adj == 0) {
1845 return;
1846 } else if (mult_adj == -1) {
1847 interval = -interval;
1848 offset = -offset;
1849 } else if (mult_adj != 1) {
1850 interval *= mult_adj;
1851 offset *= mult_adj;
1852 }
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1902
1903 WARN_ON_ONCE(1);
1904 return;
1905 }
1906
1907 tk->tkr_mono.mult += mult_adj;
1908 tk->xtime_interval += interval;
1909 tk->tkr_mono.xtime_nsec -= offset;
1910}
1911
1912
1913
1914
1915
1916static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1917{
1918 u32 mult;
1919
1920
1921
1922
1923
1924 if (likely(tk->ntp_tick == ntp_tick_length())) {
1925 mult = tk->tkr_mono.mult - tk->ntp_err_mult;
1926 } else {
1927 tk->ntp_tick = ntp_tick_length();
1928 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
1929 tk->xtime_remainder, tk->cycle_interval);
1930 }
1931
1932
1933
1934
1935
1936
1937
1938 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
1939 mult += tk->ntp_err_mult;
1940
1941 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
1942
1943 if (unlikely(tk->tkr_mono.clock->maxadj &&
1944 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1945 > tk->tkr_mono.clock->maxadj))) {
1946 printk_once(KERN_WARNING
1947 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1948 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1949 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1950 }
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1963 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
1964 tk->tkr_mono.shift;
1965 tk->xtime_sec--;
1966 tk->skip_second_overflow = 1;
1967 }
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1979{
1980 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1981 unsigned int clock_set = 0;
1982
1983 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1984 int leap;
1985
1986 tk->tkr_mono.xtime_nsec -= nsecps;
1987 tk->xtime_sec++;
1988
1989
1990
1991
1992
1993 if (unlikely(tk->skip_second_overflow)) {
1994 tk->skip_second_overflow = 0;
1995 continue;
1996 }
1997
1998
1999 leap = second_overflow(tk->xtime_sec);
2000 if (unlikely(leap)) {
2001 struct timespec64 ts;
2002
2003 tk->xtime_sec += leap;
2004
2005 ts.tv_sec = leap;
2006 ts.tv_nsec = 0;
2007 tk_set_wall_to_mono(tk,
2008 timespec64_sub(tk->wall_to_monotonic, ts));
2009
2010 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
2011
2012 clock_set = TK_CLOCK_WAS_SET;
2013 }
2014 }
2015 return clock_set;
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
2028 u32 shift, unsigned int *clock_set)
2029{
2030 u64 interval = tk->cycle_interval << shift;
2031 u64 snsec_per_sec;
2032
2033
2034 if (offset < interval)
2035 return offset;
2036
2037
2038 offset -= interval;
2039 tk->tkr_mono.cycle_last += interval;
2040 tk->tkr_raw.cycle_last += interval;
2041
2042 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2043 *clock_set |= accumulate_nsecs_to_secs(tk);
2044
2045
2046 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2047 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2048 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2049 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2050 tk->raw_sec++;
2051 }
2052
2053
2054 tk->ntp_error += tk->ntp_tick << shift;
2055 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2056 (tk->ntp_error_shift + shift);
2057
2058 return offset;
2059}
2060
2061
2062
2063
2064
2065static void timekeeping_advance(enum timekeeping_adv_mode mode)
2066{
2067 struct timekeeper *real_tk = &tk_core.timekeeper;
2068 struct timekeeper *tk = &shadow_timekeeper;
2069 u64 offset;
2070 int shift = 0, maxshift;
2071 unsigned int clock_set = 0;
2072 unsigned long flags;
2073
2074 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2075
2076
2077 if (unlikely(timekeeping_suspended))
2078 goto out;
2079
2080#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2081 offset = real_tk->cycle_interval;
2082
2083 if (mode != TK_ADV_TICK)
2084 goto out;
2085#else
2086 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2087 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2088
2089
2090 if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
2091 goto out;
2092#endif
2093
2094
2095 timekeeping_check_update(tk, offset);
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2106 shift = max(0, shift);
2107
2108 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2109 shift = min(shift, maxshift);
2110 while (offset >= tk->cycle_interval) {
2111 offset = logarithmic_accumulation(tk, offset, shift,
2112 &clock_set);
2113 if (offset < tk->cycle_interval<<shift)
2114 shift--;
2115 }
2116
2117
2118 timekeeping_adjust(tk, offset);
2119
2120
2121
2122
2123
2124 clock_set |= accumulate_nsecs_to_secs(tk);
2125
2126 write_seqcount_begin(&tk_core.seq);
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137 timekeeping_update(tk, clock_set);
2138 memcpy(real_tk, tk, sizeof(*tk));
2139
2140 write_seqcount_end(&tk_core.seq);
2141out:
2142 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2143 if (clock_set)
2144
2145 clock_was_set_delayed();
2146}
2147
2148
2149
2150
2151
2152void update_wall_time(void)
2153{
2154 timekeeping_advance(TK_ADV_TICK);
2155}
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168void getboottime64(struct timespec64 *ts)
2169{
2170 struct timekeeper *tk = &tk_core.timekeeper;
2171 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2172
2173 *ts = ktime_to_timespec64(t);
2174}
2175EXPORT_SYMBOL_GPL(getboottime64);
2176
2177unsigned long get_seconds(void)
2178{
2179 struct timekeeper *tk = &tk_core.timekeeper;
2180
2181 return tk->xtime_sec;
2182}
2183EXPORT_SYMBOL(get_seconds);
2184
2185void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2186{
2187 struct timekeeper *tk = &tk_core.timekeeper;
2188 unsigned long seq;
2189
2190 do {
2191 seq = read_seqcount_begin(&tk_core.seq);
2192
2193 *ts = tk_xtime(tk);
2194 } while (read_seqcount_retry(&tk_core.seq, seq));
2195}
2196EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2197
2198void ktime_get_coarse_ts64(struct timespec64 *ts)
2199{
2200 struct timekeeper *tk = &tk_core.timekeeper;
2201 struct timespec64 now, mono;
2202 unsigned long seq;
2203
2204 do {
2205 seq = read_seqcount_begin(&tk_core.seq);
2206
2207 now = tk_xtime(tk);
2208 mono = tk->wall_to_monotonic;
2209 } while (read_seqcount_retry(&tk_core.seq, seq));
2210
2211 set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2212 now.tv_nsec + mono.tv_nsec);
2213}
2214EXPORT_SYMBOL(ktime_get_coarse_ts64);
2215
2216
2217
2218
2219void do_timer(unsigned long ticks)
2220{
2221 jiffies_64 += ticks;
2222 calc_global_load(ticks);
2223}
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2239 ktime_t *offs_boot, ktime_t *offs_tai)
2240{
2241 struct timekeeper *tk = &tk_core.timekeeper;
2242 unsigned int seq;
2243 ktime_t base;
2244 u64 nsecs;
2245
2246 do {
2247 seq = read_seqcount_begin(&tk_core.seq);
2248
2249 base = tk->tkr_mono.base;
2250 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2251 base = ktime_add_ns(base, nsecs);
2252
2253 if (*cwsseq != tk->clock_was_set_seq) {
2254 *cwsseq = tk->clock_was_set_seq;
2255 *offs_real = tk->offs_real;
2256 *offs_boot = tk->offs_boot;
2257 *offs_tai = tk->offs_tai;
2258 }
2259
2260
2261 if (unlikely(base >= tk->next_leap_ktime))
2262 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2263
2264 } while (read_seqcount_retry(&tk_core.seq, seq));
2265
2266 return base;
2267}
2268
2269
2270
2271
2272static int timekeeping_validate_timex(const struct timex *txc)
2273{
2274 if (txc->modes & ADJ_ADJTIME) {
2275
2276 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2277 return -EINVAL;
2278 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2279 !capable(CAP_SYS_TIME))
2280 return -EPERM;
2281 } else {
2282
2283 if (txc->modes && !capable(CAP_SYS_TIME))
2284 return -EPERM;
2285
2286
2287
2288
2289 if (txc->modes & ADJ_TICK &&
2290 (txc->tick < 900000/USER_HZ ||
2291 txc->tick > 1100000/USER_HZ))
2292 return -EINVAL;
2293 }
2294
2295 if (txc->modes & ADJ_SETOFFSET) {
2296
2297 if (!capable(CAP_SYS_TIME))
2298 return -EPERM;
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308 if (txc->time.tv_usec < 0)
2309 return -EINVAL;
2310
2311 if (txc->modes & ADJ_NANO) {
2312 if (txc->time.tv_usec >= NSEC_PER_SEC)
2313 return -EINVAL;
2314 } else {
2315 if (txc->time.tv_usec >= USEC_PER_SEC)
2316 return -EINVAL;
2317 }
2318 }
2319
2320
2321
2322
2323
2324 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2325 if (LLONG_MIN / PPM_SCALE > txc->freq)
2326 return -EINVAL;
2327 if (LLONG_MAX / PPM_SCALE < txc->freq)
2328 return -EINVAL;
2329 }
2330
2331 return 0;
2332}
2333
2334
2335
2336
2337
2338int do_adjtimex(struct timex *txc)
2339{
2340 struct timekeeper *tk = &tk_core.timekeeper;
2341 unsigned long flags;
2342 struct timespec64 ts;
2343 s32 orig_tai, tai;
2344 int ret;
2345
2346
2347 ret = timekeeping_validate_timex(txc);
2348 if (ret)
2349 return ret;
2350
2351 if (txc->modes & ADJ_SETOFFSET) {
2352 struct timespec64 delta;
2353 delta.tv_sec = txc->time.tv_sec;
2354 delta.tv_nsec = txc->time.tv_usec;
2355 if (!(txc->modes & ADJ_NANO))
2356 delta.tv_nsec *= 1000;
2357 ret = timekeeping_inject_offset(&delta);
2358 if (ret)
2359 return ret;
2360 }
2361
2362 ktime_get_real_ts64(&ts);
2363
2364 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2365 write_seqcount_begin(&tk_core.seq);
2366
2367 orig_tai = tai = tk->tai_offset;
2368 ret = __do_adjtimex(txc, &ts, &tai);
2369
2370 if (tai != orig_tai) {
2371 __timekeeping_set_tai_offset(tk, tai);
2372 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2373 }
2374 tk_update_leap_state(tk);
2375
2376 write_seqcount_end(&tk_core.seq);
2377 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2378
2379
2380 if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK))
2381 timekeeping_advance(TK_ADV_FREQ);
2382
2383 if (tai != orig_tai)
2384 clock_was_set();
2385
2386 ntp_notify_cmos_timer();
2387
2388 return ret;
2389}
2390
2391#ifdef CONFIG_NTP_PPS
2392
2393
2394
2395void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2396{
2397 unsigned long flags;
2398
2399 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2400 write_seqcount_begin(&tk_core.seq);
2401
2402 __hardpps(phase_ts, raw_ts);
2403
2404 write_seqcount_end(&tk_core.seq);
2405 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2406}
2407EXPORT_SYMBOL(hardpps);
2408#endif
2409
2410
2411
2412
2413
2414
2415
2416void xtime_update(unsigned long ticks)
2417{
2418 write_seqlock(&jiffies_lock);
2419 do_timer(ticks);
2420 write_sequnlock(&jiffies_lock);
2421 update_wall_time();
2422}
2423