1
2
3
4
5
6
7
8
9
10
11#include <linux/timekeeper_internal.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/nmi.h>
18#include <linux/sched.h>
19#include <linux/sched/loadavg.h>
20#include <linux/syscore_ops.h>
21#include <linux/clocksource.h>
22#include <linux/jiffies.h>
23#include <linux/time.h>
24#include <linux/tick.h>
25#include <linux/stop_machine.h>
26#include <linux/pvclock_gtod.h>
27#include <linux/compiler.h>
28
29#include "tick-internal.h"
30#include "ntp_internal.h"
31#include "timekeeping_internal.h"
32
33#define TK_CLEAR_NTP (1 << 0)
34#define TK_MIRROR (1 << 1)
35#define TK_CLOCK_WAS_SET (1 << 2)
36
37
38
39
40
41static struct {
42 seqcount_t seq;
43 struct timekeeper timekeeper;
44} tk_core ____cacheline_aligned;
45
46static DEFINE_RAW_SPINLOCK(timekeeper_lock);
47static struct timekeeper shadow_timekeeper;
48
49
50
51
52
53
54
55
56
57
58struct tk_fast {
59 seqcount_t seq;
60 struct tk_read_base base[2];
61};
62
63
64static u64 cycles_at_suspend;
65
66static u64 dummy_clock_read(struct clocksource *cs)
67{
68 return cycles_at_suspend;
69}
70
71static struct clocksource dummy_clock = {
72 .read = dummy_clock_read,
73};
74
75static struct tk_fast tk_fast_mono ____cacheline_aligned = {
76 .base[0] = { .clock = &dummy_clock, },
77 .base[1] = { .clock = &dummy_clock, },
78};
79
80static struct tk_fast tk_fast_raw ____cacheline_aligned = {
81 .base[0] = { .clock = &dummy_clock, },
82 .base[1] = { .clock = &dummy_clock, },
83};
84
85
86int __read_mostly timekeeping_suspended;
87
88static inline void tk_normalize_xtime(struct timekeeper *tk)
89{
90 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
91 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
92 tk->xtime_sec++;
93 }
94 while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
95 tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
96 tk->raw_sec++;
97 }
98}
99
100static inline struct timespec64 tk_xtime(struct timekeeper *tk)
101{
102 struct timespec64 ts;
103
104 ts.tv_sec = tk->xtime_sec;
105 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
106 return ts;
107}
108
109static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
110{
111 tk->xtime_sec = ts->tv_sec;
112 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
113}
114
115static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
116{
117 tk->xtime_sec += ts->tv_sec;
118 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
119 tk_normalize_xtime(tk);
120}
121
122static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
123{
124 struct timespec64 tmp;
125
126
127
128
129
130 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
131 -tk->wall_to_monotonic.tv_nsec);
132 WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
133 tk->wall_to_monotonic = wtm;
134 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
135 tk->offs_real = timespec64_to_ktime(tmp);
136 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
137}
138
139static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
140{
141 tk->offs_boot = ktime_add(tk->offs_boot, delta);
142}
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157static inline u64 tk_clock_read(struct tk_read_base *tkr)
158{
159 struct clocksource *clock = READ_ONCE(tkr->clock);
160
161 return clock->read(clock);
162}
163
164#ifdef CONFIG_DEBUG_TIMEKEEPING
165#define WARNING_FREQ (HZ*300)
166
167static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
168{
169
170 u64 max_cycles = tk->tkr_mono.clock->max_cycles;
171 const char *name = tk->tkr_mono.clock->name;
172
173 if (offset > max_cycles) {
174 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
175 offset, name, max_cycles);
176 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
177 } else {
178 if (offset > (max_cycles >> 1)) {
179 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
180 offset, name, max_cycles >> 1);
181 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
182 }
183 }
184
185 if (tk->underflow_seen) {
186 if (jiffies - tk->last_warning > WARNING_FREQ) {
187 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
188 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
189 printk_deferred(" Your kernel is probably still fine.\n");
190 tk->last_warning = jiffies;
191 }
192 tk->underflow_seen = 0;
193 }
194
195 if (tk->overflow_seen) {
196 if (jiffies - tk->last_warning > WARNING_FREQ) {
197 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
198 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
199 printk_deferred(" Your kernel is probably still fine.\n");
200 tk->last_warning = jiffies;
201 }
202 tk->overflow_seen = 0;
203 }
204}
205
206static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
207{
208 struct timekeeper *tk = &tk_core.timekeeper;
209 u64 now, last, mask, max, delta;
210 unsigned int seq;
211
212
213
214
215
216
217
218
219 do {
220 seq = read_seqcount_begin(&tk_core.seq);
221 now = tk_clock_read(tkr);
222 last = tkr->cycle_last;
223 mask = tkr->mask;
224 max = tkr->clock->max_cycles;
225 } while (read_seqcount_retry(&tk_core.seq, seq));
226
227 delta = clocksource_delta(now, last, mask);
228
229
230
231
232
233 if (unlikely((~delta & mask) < (mask >> 3))) {
234 tk->underflow_seen = 1;
235 delta = 0;
236 }
237
238
239 if (unlikely(delta > max)) {
240 tk->overflow_seen = 1;
241 delta = tkr->clock->max_cycles;
242 }
243
244 return delta;
245}
246#else
247static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
248{
249}
250static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
251{
252 u64 cycle_now, delta;
253
254
255 cycle_now = tk_clock_read(tkr);
256
257
258 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
259
260 return delta;
261}
262#endif
263
264
265
266
267
268
269
270
271
272
273
274
275static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
276{
277 u64 interval;
278 u64 tmp, ntpinterval;
279 struct clocksource *old_clock;
280
281 ++tk->cs_was_changed_seq;
282 old_clock = tk->tkr_mono.clock;
283 tk->tkr_mono.clock = clock;
284 tk->tkr_mono.mask = clock->mask;
285 tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
286
287 tk->tkr_raw.clock = clock;
288 tk->tkr_raw.mask = clock->mask;
289 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
290
291
292 tmp = NTP_INTERVAL_LENGTH;
293 tmp <<= clock->shift;
294 ntpinterval = tmp;
295 tmp += clock->mult/2;
296 do_div(tmp, clock->mult);
297 if (tmp == 0)
298 tmp = 1;
299
300 interval = (u64) tmp;
301 tk->cycle_interval = interval;
302
303
304 tk->xtime_interval = interval * clock->mult;
305 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
306 tk->raw_interval = interval * clock->mult;
307
308
309 if (old_clock) {
310 int shift_change = clock->shift - old_clock->shift;
311 if (shift_change < 0) {
312 tk->tkr_mono.xtime_nsec >>= -shift_change;
313 tk->tkr_raw.xtime_nsec >>= -shift_change;
314 } else {
315 tk->tkr_mono.xtime_nsec <<= shift_change;
316 tk->tkr_raw.xtime_nsec <<= shift_change;
317 }
318 }
319
320 tk->tkr_mono.shift = clock->shift;
321 tk->tkr_raw.shift = clock->shift;
322
323 tk->ntp_error = 0;
324 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
325 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
326
327
328
329
330
331
332 tk->tkr_mono.mult = clock->mult;
333 tk->tkr_raw.mult = clock->mult;
334 tk->ntp_err_mult = 0;
335 tk->skip_second_overflow = 0;
336}
337
338
339
340#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
341static u32 default_arch_gettimeoffset(void) { return 0; }
342u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
343#else
344static inline u32 arch_gettimeoffset(void) { return 0; }
345#endif
346
347static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr, u64 delta)
348{
349 u64 nsec;
350
351 nsec = delta * tkr->mult + tkr->xtime_nsec;
352 nsec >>= tkr->shift;
353
354
355 return nsec + arch_gettimeoffset();
356}
357
358static inline u64 timekeeping_get_ns(struct tk_read_base *tkr)
359{
360 u64 delta;
361
362 delta = timekeeping_get_delta(tkr);
363 return timekeeping_delta_to_ns(tkr, delta);
364}
365
366static inline u64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, u64 cycles)
367{
368 u64 delta;
369
370
371 delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
372 return timekeeping_delta_to_ns(tkr, delta);
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
390{
391 struct tk_read_base *base = tkf->base;
392
393
394 raw_write_seqcount_latch(&tkf->seq);
395
396
397 memcpy(base, tkr, sizeof(*base));
398
399
400 raw_write_seqcount_latch(&tkf->seq);
401
402
403 memcpy(base + 1, base, sizeof(*base));
404}
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
439{
440 struct tk_read_base *tkr;
441 unsigned int seq;
442 u64 now;
443
444 do {
445 seq = raw_read_seqcount_latch(&tkf->seq);
446 tkr = tkf->base + (seq & 0x01);
447 now = ktime_to_ns(tkr->base);
448
449 now += timekeeping_delta_to_ns(tkr,
450 clocksource_delta(
451 tk_clock_read(tkr),
452 tkr->cycle_last,
453 tkr->mask));
454 } while (read_seqcount_retry(&tkf->seq, seq));
455
456 return now;
457}
458
459u64 ktime_get_mono_fast_ns(void)
460{
461 return __ktime_get_fast_ns(&tk_fast_mono);
462}
463EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
464
465u64 ktime_get_raw_fast_ns(void)
466{
467 return __ktime_get_fast_ns(&tk_fast_raw);
468}
469EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492u64 notrace ktime_get_boot_fast_ns(void)
493{
494 struct timekeeper *tk = &tk_core.timekeeper;
495
496 return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
497}
498EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
499
500
501
502
503
504static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf)
505{
506 struct tk_read_base *tkr;
507 unsigned int seq;
508 u64 now;
509
510 do {
511 seq = raw_read_seqcount_latch(&tkf->seq);
512 tkr = tkf->base + (seq & 0x01);
513 now = ktime_to_ns(tkr->base_real);
514
515 now += timekeeping_delta_to_ns(tkr,
516 clocksource_delta(
517 tk_clock_read(tkr),
518 tkr->cycle_last,
519 tkr->mask));
520 } while (read_seqcount_retry(&tkf->seq, seq));
521
522 return now;
523}
524
525
526
527
528u64 ktime_get_real_fast_ns(void)
529{
530 return __ktime_get_real_fast_ns(&tk_fast_mono);
531}
532EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns);
533
534
535
536
537
538
539
540
541
542
543
544static void halt_fast_timekeeper(struct timekeeper *tk)
545{
546 static struct tk_read_base tkr_dummy;
547 struct tk_read_base *tkr = &tk->tkr_mono;
548
549 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
550 cycles_at_suspend = tk_clock_read(tkr);
551 tkr_dummy.clock = &dummy_clock;
552 tkr_dummy.base_real = tkr->base + tk->offs_real;
553 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
554
555 tkr = &tk->tkr_raw;
556 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
557 tkr_dummy.clock = &dummy_clock;
558 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
559}
560
561static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
562
563static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
564{
565 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
566}
567
568
569
570
571int pvclock_gtod_register_notifier(struct notifier_block *nb)
572{
573 struct timekeeper *tk = &tk_core.timekeeper;
574 unsigned long flags;
575 int ret;
576
577 raw_spin_lock_irqsave(&timekeeper_lock, flags);
578 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
579 update_pvclock_gtod(tk, true);
580 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
581
582 return ret;
583}
584EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
585
586
587
588
589
590int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
591{
592 unsigned long flags;
593 int ret;
594
595 raw_spin_lock_irqsave(&timekeeper_lock, flags);
596 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
597 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
598
599 return ret;
600}
601EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
602
603
604
605
606static inline void tk_update_leap_state(struct timekeeper *tk)
607{
608 tk->next_leap_ktime = ntp_get_next_leap();
609 if (tk->next_leap_ktime != KTIME_MAX)
610
611 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
612}
613
614
615
616
617static inline void tk_update_ktime_data(struct timekeeper *tk)
618{
619 u64 seconds;
620 u32 nsec;
621
622
623
624
625
626
627
628
629 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
630 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
631 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
632
633
634
635
636
637
638 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
639 if (nsec >= NSEC_PER_SEC)
640 seconds++;
641 tk->ktime_sec = seconds;
642
643
644 tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
645}
646
647
648static void timekeeping_update(struct timekeeper *tk, unsigned int action)
649{
650 if (action & TK_CLEAR_NTP) {
651 tk->ntp_error = 0;
652 ntp_clear();
653 }
654
655 tk_update_leap_state(tk);
656 tk_update_ktime_data(tk);
657
658 update_vsyscall(tk);
659 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
660
661 tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
662 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
663 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
664
665 if (action & TK_CLOCK_WAS_SET)
666 tk->clock_was_set_seq++;
667
668
669
670
671
672 if (action & TK_MIRROR)
673 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
674 sizeof(tk_core.timekeeper));
675}
676
677
678
679
680
681
682
683
684static void timekeeping_forward_now(struct timekeeper *tk)
685{
686 u64 cycle_now, delta;
687
688 cycle_now = tk_clock_read(&tk->tkr_mono);
689 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
690 tk->tkr_mono.cycle_last = cycle_now;
691 tk->tkr_raw.cycle_last = cycle_now;
692
693 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
694
695
696 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
697
698
699 tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
700
701
702 tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
703
704 tk_normalize_xtime(tk);
705}
706
707
708
709
710
711
712
713void ktime_get_real_ts64(struct timespec64 *ts)
714{
715 struct timekeeper *tk = &tk_core.timekeeper;
716 unsigned long seq;
717 u64 nsecs;
718
719 WARN_ON(timekeeping_suspended);
720
721 do {
722 seq = read_seqcount_begin(&tk_core.seq);
723
724 ts->tv_sec = tk->xtime_sec;
725 nsecs = timekeeping_get_ns(&tk->tkr_mono);
726
727 } while (read_seqcount_retry(&tk_core.seq, seq));
728
729 ts->tv_nsec = 0;
730 timespec64_add_ns(ts, nsecs);
731}
732EXPORT_SYMBOL(ktime_get_real_ts64);
733
734ktime_t ktime_get(void)
735{
736 struct timekeeper *tk = &tk_core.timekeeper;
737 unsigned int seq;
738 ktime_t base;
739 u64 nsecs;
740
741 WARN_ON(timekeeping_suspended);
742
743 do {
744 seq = read_seqcount_begin(&tk_core.seq);
745 base = tk->tkr_mono.base;
746 nsecs = timekeeping_get_ns(&tk->tkr_mono);
747
748 } while (read_seqcount_retry(&tk_core.seq, seq));
749
750 return ktime_add_ns(base, nsecs);
751}
752EXPORT_SYMBOL_GPL(ktime_get);
753
754u32 ktime_get_resolution_ns(void)
755{
756 struct timekeeper *tk = &tk_core.timekeeper;
757 unsigned int seq;
758 u32 nsecs;
759
760 WARN_ON(timekeeping_suspended);
761
762 do {
763 seq = read_seqcount_begin(&tk_core.seq);
764 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
765 } while (read_seqcount_retry(&tk_core.seq, seq));
766
767 return nsecs;
768}
769EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
770
771static ktime_t *offsets[TK_OFFS_MAX] = {
772 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
773 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
774 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
775};
776
777ktime_t ktime_get_with_offset(enum tk_offsets offs)
778{
779 struct timekeeper *tk = &tk_core.timekeeper;
780 unsigned int seq;
781 ktime_t base, *offset = offsets[offs];
782 u64 nsecs;
783
784 WARN_ON(timekeeping_suspended);
785
786 do {
787 seq = read_seqcount_begin(&tk_core.seq);
788 base = ktime_add(tk->tkr_mono.base, *offset);
789 nsecs = timekeeping_get_ns(&tk->tkr_mono);
790
791 } while (read_seqcount_retry(&tk_core.seq, seq));
792
793 return ktime_add_ns(base, nsecs);
794
795}
796EXPORT_SYMBOL_GPL(ktime_get_with_offset);
797
798ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
799{
800 struct timekeeper *tk = &tk_core.timekeeper;
801 unsigned int seq;
802 ktime_t base, *offset = offsets[offs];
803
804 WARN_ON(timekeeping_suspended);
805
806 do {
807 seq = read_seqcount_begin(&tk_core.seq);
808 base = ktime_add(tk->tkr_mono.base, *offset);
809
810 } while (read_seqcount_retry(&tk_core.seq, seq));
811
812 return base;
813
814}
815EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
816
817
818
819
820
821
822ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
823{
824 ktime_t *offset = offsets[offs];
825 unsigned long seq;
826 ktime_t tconv;
827
828 do {
829 seq = read_seqcount_begin(&tk_core.seq);
830 tconv = ktime_add(tmono, *offset);
831 } while (read_seqcount_retry(&tk_core.seq, seq));
832
833 return tconv;
834}
835EXPORT_SYMBOL_GPL(ktime_mono_to_any);
836
837
838
839
840ktime_t ktime_get_raw(void)
841{
842 struct timekeeper *tk = &tk_core.timekeeper;
843 unsigned int seq;
844 ktime_t base;
845 u64 nsecs;
846
847 do {
848 seq = read_seqcount_begin(&tk_core.seq);
849 base = tk->tkr_raw.base;
850 nsecs = timekeeping_get_ns(&tk->tkr_raw);
851
852 } while (read_seqcount_retry(&tk_core.seq, seq));
853
854 return ktime_add_ns(base, nsecs);
855}
856EXPORT_SYMBOL_GPL(ktime_get_raw);
857
858
859
860
861
862
863
864
865
866void ktime_get_ts64(struct timespec64 *ts)
867{
868 struct timekeeper *tk = &tk_core.timekeeper;
869 struct timespec64 tomono;
870 unsigned int seq;
871 u64 nsec;
872
873 WARN_ON(timekeeping_suspended);
874
875 do {
876 seq = read_seqcount_begin(&tk_core.seq);
877 ts->tv_sec = tk->xtime_sec;
878 nsec = timekeeping_get_ns(&tk->tkr_mono);
879 tomono = tk->wall_to_monotonic;
880
881 } while (read_seqcount_retry(&tk_core.seq, seq));
882
883 ts->tv_sec += tomono.tv_sec;
884 ts->tv_nsec = 0;
885 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
886}
887EXPORT_SYMBOL_GPL(ktime_get_ts64);
888
889
890
891
892
893
894
895
896
897
898time64_t ktime_get_seconds(void)
899{
900 struct timekeeper *tk = &tk_core.timekeeper;
901
902 WARN_ON(timekeeping_suspended);
903 return tk->ktime_sec;
904}
905EXPORT_SYMBOL_GPL(ktime_get_seconds);
906
907
908
909
910
911
912
913
914
915
916
917
918time64_t ktime_get_real_seconds(void)
919{
920 struct timekeeper *tk = &tk_core.timekeeper;
921 time64_t seconds;
922 unsigned int seq;
923
924 if (IS_ENABLED(CONFIG_64BIT))
925 return tk->xtime_sec;
926
927 do {
928 seq = read_seqcount_begin(&tk_core.seq);
929 seconds = tk->xtime_sec;
930
931 } while (read_seqcount_retry(&tk_core.seq, seq));
932
933 return seconds;
934}
935EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
936
937
938
939
940
941
942time64_t __ktime_get_real_seconds(void)
943{
944 struct timekeeper *tk = &tk_core.timekeeper;
945
946 return tk->xtime_sec;
947}
948
949
950
951
952
953void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
954{
955 struct timekeeper *tk = &tk_core.timekeeper;
956 unsigned long seq;
957 ktime_t base_raw;
958 ktime_t base_real;
959 u64 nsec_raw;
960 u64 nsec_real;
961 u64 now;
962
963 WARN_ON_ONCE(timekeeping_suspended);
964
965 do {
966 seq = read_seqcount_begin(&tk_core.seq);
967 now = tk_clock_read(&tk->tkr_mono);
968 systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
969 systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
970 base_real = ktime_add(tk->tkr_mono.base,
971 tk_core.timekeeper.offs_real);
972 base_raw = tk->tkr_raw.base;
973 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
974 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
975 } while (read_seqcount_retry(&tk_core.seq, seq));
976
977 systime_snapshot->cycles = now;
978 systime_snapshot->real = ktime_add_ns(base_real, nsec_real);
979 systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw);
980}
981EXPORT_SYMBOL_GPL(ktime_get_snapshot);
982
983
984static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
985{
986 u64 tmp, rem;
987
988 tmp = div64_u64_rem(*base, div, &rem);
989
990 if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) ||
991 ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
992 return -EOVERFLOW;
993 tmp *= mult;
994 rem *= mult;
995
996 do_div(rem, div);
997 *base = tmp + rem;
998 return 0;
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018static int adjust_historical_crosststamp(struct system_time_snapshot *history,
1019 u64 partial_history_cycles,
1020 u64 total_history_cycles,
1021 bool discontinuity,
1022 struct system_device_crosststamp *ts)
1023{
1024 struct timekeeper *tk = &tk_core.timekeeper;
1025 u64 corr_raw, corr_real;
1026 bool interp_forward;
1027 int ret;
1028
1029 if (total_history_cycles == 0 || partial_history_cycles == 0)
1030 return 0;
1031
1032
1033 interp_forward = partial_history_cycles > total_history_cycles / 2;
1034 partial_history_cycles = interp_forward ?
1035 total_history_cycles - partial_history_cycles :
1036 partial_history_cycles;
1037
1038
1039
1040
1041
1042 corr_raw = (u64)ktime_to_ns(
1043 ktime_sub(ts->sys_monoraw, history->raw));
1044 ret = scale64_check_overflow(partial_history_cycles,
1045 total_history_cycles, &corr_raw);
1046 if (ret)
1047 return ret;
1048
1049
1050
1051
1052
1053
1054
1055
1056 if (discontinuity) {
1057 corr_real = mul_u64_u32_div
1058 (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
1059 } else {
1060 corr_real = (u64)ktime_to_ns(
1061 ktime_sub(ts->sys_realtime, history->real));
1062 ret = scale64_check_overflow(partial_history_cycles,
1063 total_history_cycles, &corr_real);
1064 if (ret)
1065 return ret;
1066 }
1067
1068
1069 if (interp_forward) {
1070 ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw);
1071 ts->sys_realtime = ktime_add_ns(history->real, corr_real);
1072 } else {
1073 ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw);
1074 ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real);
1075 }
1076
1077 return 0;
1078}
1079
1080
1081
1082
1083static bool cycle_between(u64 before, u64 test, u64 after)
1084{
1085 if (test > before && test < after)
1086 return true;
1087 if (test < before && before > after)
1088 return true;
1089 return false;
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103int get_device_system_crosststamp(int (*get_time_fn)
1104 (ktime_t *device_time,
1105 struct system_counterval_t *sys_counterval,
1106 void *ctx),
1107 void *ctx,
1108 struct system_time_snapshot *history_begin,
1109 struct system_device_crosststamp *xtstamp)
1110{
1111 struct system_counterval_t system_counterval;
1112 struct timekeeper *tk = &tk_core.timekeeper;
1113 u64 cycles, now, interval_start;
1114 unsigned int clock_was_set_seq = 0;
1115 ktime_t base_real, base_raw;
1116 u64 nsec_real, nsec_raw;
1117 u8 cs_was_changed_seq;
1118 unsigned long seq;
1119 bool do_interp;
1120 int ret;
1121
1122 do {
1123 seq = read_seqcount_begin(&tk_core.seq);
1124
1125
1126
1127
1128 ret = get_time_fn(&xtstamp->device, &system_counterval, ctx);
1129 if (ret)
1130 return ret;
1131
1132
1133
1134
1135
1136
1137 if (tk->tkr_mono.clock != system_counterval.cs)
1138 return -ENODEV;
1139 cycles = system_counterval.cycles;
1140
1141
1142
1143
1144
1145 now = tk_clock_read(&tk->tkr_mono);
1146 interval_start = tk->tkr_mono.cycle_last;
1147 if (!cycle_between(interval_start, cycles, now)) {
1148 clock_was_set_seq = tk->clock_was_set_seq;
1149 cs_was_changed_seq = tk->cs_was_changed_seq;
1150 cycles = interval_start;
1151 do_interp = true;
1152 } else {
1153 do_interp = false;
1154 }
1155
1156 base_real = ktime_add(tk->tkr_mono.base,
1157 tk_core.timekeeper.offs_real);
1158 base_raw = tk->tkr_raw.base;
1159
1160 nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
1161 system_counterval.cycles);
1162 nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
1163 system_counterval.cycles);
1164 } while (read_seqcount_retry(&tk_core.seq, seq));
1165
1166 xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
1167 xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw);
1168
1169
1170
1171
1172
1173 if (do_interp) {
1174 u64 partial_history_cycles, total_history_cycles;
1175 bool discontinuity;
1176
1177
1178
1179
1180
1181
1182 if (!history_begin ||
1183 !cycle_between(history_begin->cycles,
1184 system_counterval.cycles, cycles) ||
1185 history_begin->cs_was_changed_seq != cs_was_changed_seq)
1186 return -EINVAL;
1187 partial_history_cycles = cycles - system_counterval.cycles;
1188 total_history_cycles = cycles - history_begin->cycles;
1189 discontinuity =
1190 history_begin->clock_was_set_seq != clock_was_set_seq;
1191
1192 ret = adjust_historical_crosststamp(history_begin,
1193 partial_history_cycles,
1194 total_history_cycles,
1195 discontinuity, xtstamp);
1196 if (ret)
1197 return ret;
1198 }
1199
1200 return 0;
1201}
1202EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
1203
1204
1205
1206
1207
1208
1209
1210void do_gettimeofday(struct timeval *tv)
1211{
1212 struct timespec64 now;
1213
1214 getnstimeofday64(&now);
1215 tv->tv_sec = now.tv_sec;
1216 tv->tv_usec = now.tv_nsec/1000;
1217}
1218EXPORT_SYMBOL(do_gettimeofday);
1219
1220
1221
1222
1223
1224
1225
1226int do_settimeofday64(const struct timespec64 *ts)
1227{
1228 struct timekeeper *tk = &tk_core.timekeeper;
1229 struct timespec64 ts_delta, xt;
1230 unsigned long flags;
1231 int ret = 0;
1232
1233 if (!timespec64_valid_strict(ts))
1234 return -EINVAL;
1235
1236 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1237 write_seqcount_begin(&tk_core.seq);
1238
1239 timekeeping_forward_now(tk);
1240
1241 xt = tk_xtime(tk);
1242 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1243 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1244
1245 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1246 ret = -EINVAL;
1247 goto out;
1248 }
1249
1250 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
1251
1252 tk_set_xtime(tk, ts);
1253out:
1254 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1255
1256 write_seqcount_end(&tk_core.seq);
1257 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1258
1259
1260 clock_was_set();
1261
1262 return ret;
1263}
1264EXPORT_SYMBOL(do_settimeofday64);
1265
1266
1267
1268
1269
1270
1271
1272static int timekeeping_inject_offset(struct timespec64 *ts)
1273{
1274 struct timekeeper *tk = &tk_core.timekeeper;
1275 unsigned long flags;
1276 struct timespec64 tmp;
1277 int ret = 0;
1278
1279 if (ts->tv_nsec < 0 || ts->tv_nsec >= NSEC_PER_SEC)
1280 return -EINVAL;
1281
1282 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1283 write_seqcount_begin(&tk_core.seq);
1284
1285 timekeeping_forward_now(tk);
1286
1287
1288 tmp = timespec64_add(tk_xtime(tk), *ts);
1289 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
1290 !timespec64_valid_strict(&tmp)) {
1291 ret = -EINVAL;
1292 goto error;
1293 }
1294
1295 tk_xtime_add(tk, ts);
1296 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
1297
1298error:
1299 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1300
1301 write_seqcount_end(&tk_core.seq);
1302 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1303
1304
1305 clock_was_set();
1306
1307 return ret;
1308}
1309
1310
1311
1312
1313
1314int persistent_clock_is_local;
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332void timekeeping_warp_clock(void)
1333{
1334 if (sys_tz.tz_minuteswest != 0) {
1335 struct timespec64 adjust;
1336
1337 persistent_clock_is_local = 1;
1338 adjust.tv_sec = sys_tz.tz_minuteswest * 60;
1339 adjust.tv_nsec = 0;
1340 timekeeping_inject_offset(&adjust);
1341 }
1342}
1343
1344
1345
1346
1347
1348static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1349{
1350 tk->tai_offset = tai_offset;
1351 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1352}
1353
1354
1355
1356
1357
1358
1359static int change_clocksource(void *data)
1360{
1361 struct timekeeper *tk = &tk_core.timekeeper;
1362 struct clocksource *new, *old;
1363 unsigned long flags;
1364
1365 new = (struct clocksource *) data;
1366
1367 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1368 write_seqcount_begin(&tk_core.seq);
1369
1370 timekeeping_forward_now(tk);
1371
1372
1373
1374
1375 if (try_module_get(new->owner)) {
1376 if (!new->enable || new->enable(new) == 0) {
1377 old = tk->tkr_mono.clock;
1378 tk_setup_internals(tk, new);
1379 if (old->disable)
1380 old->disable(old);
1381 module_put(old->owner);
1382 } else {
1383 module_put(new->owner);
1384 }
1385 }
1386 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1387
1388 write_seqcount_end(&tk_core.seq);
1389 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1390
1391 return 0;
1392}
1393
1394
1395
1396
1397
1398
1399
1400
1401int timekeeping_notify(struct clocksource *clock)
1402{
1403 struct timekeeper *tk = &tk_core.timekeeper;
1404
1405 if (tk->tkr_mono.clock == clock)
1406 return 0;
1407 stop_machine(change_clocksource, clock, NULL);
1408 tick_clock_notify();
1409 return tk->tkr_mono.clock == clock ? 0 : -1;
1410}
1411
1412
1413
1414
1415
1416
1417
1418void ktime_get_raw_ts64(struct timespec64 *ts)
1419{
1420 struct timekeeper *tk = &tk_core.timekeeper;
1421 unsigned long seq;
1422 u64 nsecs;
1423
1424 do {
1425 seq = read_seqcount_begin(&tk_core.seq);
1426 ts->tv_sec = tk->raw_sec;
1427 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1428
1429 } while (read_seqcount_retry(&tk_core.seq, seq));
1430
1431 ts->tv_nsec = 0;
1432 timespec64_add_ns(ts, nsecs);
1433}
1434EXPORT_SYMBOL(ktime_get_raw_ts64);
1435
1436
1437
1438
1439
1440int timekeeping_valid_for_hres(void)
1441{
1442 struct timekeeper *tk = &tk_core.timekeeper;
1443 unsigned long seq;
1444 int ret;
1445
1446 do {
1447 seq = read_seqcount_begin(&tk_core.seq);
1448
1449 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1450
1451 } while (read_seqcount_retry(&tk_core.seq, seq));
1452
1453 return ret;
1454}
1455
1456
1457
1458
1459u64 timekeeping_max_deferment(void)
1460{
1461 struct timekeeper *tk = &tk_core.timekeeper;
1462 unsigned long seq;
1463 u64 ret;
1464
1465 do {
1466 seq = read_seqcount_begin(&tk_core.seq);
1467
1468 ret = tk->tkr_mono.clock->max_idle_ns;
1469
1470 } while (read_seqcount_retry(&tk_core.seq, seq));
1471
1472 return ret;
1473}
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484void __weak read_persistent_clock(struct timespec *ts)
1485{
1486 ts->tv_sec = 0;
1487 ts->tv_nsec = 0;
1488}
1489
1490void __weak read_persistent_clock64(struct timespec64 *ts64)
1491{
1492 struct timespec ts;
1493
1494 read_persistent_clock(&ts);
1495 *ts64 = timespec_to_timespec64(ts);
1496}
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507void __weak read_boot_clock64(struct timespec64 *ts)
1508{
1509 ts->tv_sec = 0;
1510 ts->tv_nsec = 0;
1511}
1512
1513
1514static bool sleeptime_injected;
1515
1516
1517static bool persistent_clock_exists;
1518
1519
1520
1521
1522void __init timekeeping_init(void)
1523{
1524 struct timekeeper *tk = &tk_core.timekeeper;
1525 struct clocksource *clock;
1526 unsigned long flags;
1527 struct timespec64 now, boot, tmp;
1528
1529 read_persistent_clock64(&now);
1530 if (!timespec64_valid_strict(&now)) {
1531 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1532 " Check your CMOS/BIOS settings.\n");
1533 now.tv_sec = 0;
1534 now.tv_nsec = 0;
1535 } else if (now.tv_sec || now.tv_nsec)
1536 persistent_clock_exists = true;
1537
1538 read_boot_clock64(&boot);
1539 if (!timespec64_valid_strict(&boot)) {
1540 pr_warn("WARNING: Boot clock returned invalid value!\n"
1541 " Check your CMOS/BIOS settings.\n");
1542 boot.tv_sec = 0;
1543 boot.tv_nsec = 0;
1544 }
1545
1546 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1547 write_seqcount_begin(&tk_core.seq);
1548 ntp_init();
1549
1550 clock = clocksource_default_clock();
1551 if (clock->enable)
1552 clock->enable(clock);
1553 tk_setup_internals(tk, clock);
1554
1555 tk_set_xtime(tk, &now);
1556 tk->raw_sec = 0;
1557 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1558 boot = tk_xtime(tk);
1559
1560 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1561 tk_set_wall_to_mono(tk, tmp);
1562
1563 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1564
1565 write_seqcount_end(&tk_core.seq);
1566 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1567}
1568
1569
1570static struct timespec64 timekeeping_suspend_time;
1571
1572
1573
1574
1575
1576
1577
1578
1579static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1580 struct timespec64 *delta)
1581{
1582 if (!timespec64_valid_strict(delta)) {
1583 printk_deferred(KERN_WARNING
1584 "__timekeeping_inject_sleeptime: Invalid "
1585 "sleep delta value!\n");
1586 return;
1587 }
1588 tk_xtime_add(tk, delta);
1589 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1590 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1591 tk_debug_account_sleep_time(delta);
1592}
1593
1594#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611bool timekeeping_rtc_skipresume(void)
1612{
1613 return sleeptime_injected;
1614}
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625bool timekeeping_rtc_skipsuspend(void)
1626{
1627 return persistent_clock_exists;
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1642{
1643 struct timekeeper *tk = &tk_core.timekeeper;
1644 unsigned long flags;
1645
1646 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1647 write_seqcount_begin(&tk_core.seq);
1648
1649 timekeeping_forward_now(tk);
1650
1651 __timekeeping_inject_sleeptime(tk, delta);
1652
1653 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1654
1655 write_seqcount_end(&tk_core.seq);
1656 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1657
1658
1659 clock_was_set();
1660}
1661#endif
1662
1663
1664
1665
1666void timekeeping_resume(void)
1667{
1668 struct timekeeper *tk = &tk_core.timekeeper;
1669 struct clocksource *clock = tk->tkr_mono.clock;
1670 unsigned long flags;
1671 struct timespec64 ts_new, ts_delta;
1672 u64 cycle_now;
1673
1674 sleeptime_injected = false;
1675 read_persistent_clock64(&ts_new);
1676
1677 clockevents_resume();
1678 clocksource_resume();
1679
1680 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1681 write_seqcount_begin(&tk_core.seq);
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695 cycle_now = tk_clock_read(&tk->tkr_mono);
1696 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1697 cycle_now > tk->tkr_mono.cycle_last) {
1698 u64 nsec, cyc_delta;
1699
1700 cyc_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1701 tk->tkr_mono.mask);
1702 nsec = mul_u64_u32_shr(cyc_delta, clock->mult, clock->shift);
1703 ts_delta = ns_to_timespec64(nsec);
1704 sleeptime_injected = true;
1705 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1706 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1707 sleeptime_injected = true;
1708 }
1709
1710 if (sleeptime_injected)
1711 __timekeeping_inject_sleeptime(tk, &ts_delta);
1712
1713
1714 tk->tkr_mono.cycle_last = cycle_now;
1715 tk->tkr_raw.cycle_last = cycle_now;
1716
1717 tk->ntp_error = 0;
1718 timekeeping_suspended = 0;
1719 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1720 write_seqcount_end(&tk_core.seq);
1721 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1722
1723 touch_softlockup_watchdog();
1724
1725 tick_resume();
1726 hrtimers_resume();
1727}
1728
1729int timekeeping_suspend(void)
1730{
1731 struct timekeeper *tk = &tk_core.timekeeper;
1732 unsigned long flags;
1733 struct timespec64 delta, delta_delta;
1734 static struct timespec64 old_delta;
1735
1736 read_persistent_clock64(&timekeeping_suspend_time);
1737
1738
1739
1740
1741
1742
1743 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1744 persistent_clock_exists = true;
1745
1746 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1747 write_seqcount_begin(&tk_core.seq);
1748 timekeeping_forward_now(tk);
1749 timekeeping_suspended = 1;
1750
1751 if (persistent_clock_exists) {
1752
1753
1754
1755
1756
1757
1758 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1759 delta_delta = timespec64_sub(delta, old_delta);
1760 if (abs(delta_delta.tv_sec) >= 2) {
1761
1762
1763
1764
1765 old_delta = delta;
1766 } else {
1767
1768 timekeeping_suspend_time =
1769 timespec64_add(timekeeping_suspend_time, delta_delta);
1770 }
1771 }
1772
1773 timekeeping_update(tk, TK_MIRROR);
1774 halt_fast_timekeeper(tk);
1775 write_seqcount_end(&tk_core.seq);
1776 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1777
1778 tick_suspend();
1779 clocksource_suspend();
1780 clockevents_suspend();
1781
1782 return 0;
1783}
1784
1785
1786static struct syscore_ops timekeeping_syscore_ops = {
1787 .resume = timekeeping_resume,
1788 .suspend = timekeeping_suspend,
1789};
1790
1791static int __init timekeeping_init_ops(void)
1792{
1793 register_syscore_ops(&timekeeping_syscore_ops);
1794 return 0;
1795}
1796device_initcall(timekeeping_init_ops);
1797
1798
1799
1800
1801static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1802 s64 offset,
1803 s32 mult_adj)
1804{
1805 s64 interval = tk->cycle_interval;
1806
1807 if (mult_adj == 0) {
1808 return;
1809 } else if (mult_adj == -1) {
1810 interval = -interval;
1811 offset = -offset;
1812 } else if (mult_adj != 1) {
1813 interval *= mult_adj;
1814 offset *= mult_adj;
1815 }
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1865
1866 WARN_ON_ONCE(1);
1867 return;
1868 }
1869
1870 tk->tkr_mono.mult += mult_adj;
1871 tk->xtime_interval += interval;
1872 tk->tkr_mono.xtime_nsec -= offset;
1873}
1874
1875
1876
1877
1878
1879static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1880{
1881 u32 mult;
1882
1883
1884
1885
1886
1887 if (likely(tk->ntp_tick == ntp_tick_length())) {
1888 mult = tk->tkr_mono.mult - tk->ntp_err_mult;
1889 } else {
1890 tk->ntp_tick = ntp_tick_length();
1891 mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
1892 tk->xtime_remainder, tk->cycle_interval);
1893 }
1894
1895
1896
1897
1898
1899
1900
1901 tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
1902 mult += tk->ntp_err_mult;
1903
1904 timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
1905
1906 if (unlikely(tk->tkr_mono.clock->maxadj &&
1907 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1908 > tk->tkr_mono.clock->maxadj))) {
1909 printk_once(KERN_WARNING
1910 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1911 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1912 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1913 }
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1926 tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
1927 tk->tkr_mono.shift;
1928 tk->xtime_sec--;
1929 tk->skip_second_overflow = 1;
1930 }
1931}
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1942{
1943 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1944 unsigned int clock_set = 0;
1945
1946 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1947 int leap;
1948
1949 tk->tkr_mono.xtime_nsec -= nsecps;
1950 tk->xtime_sec++;
1951
1952
1953
1954
1955
1956 if (unlikely(tk->skip_second_overflow)) {
1957 tk->skip_second_overflow = 0;
1958 continue;
1959 }
1960
1961
1962 leap = second_overflow(tk->xtime_sec);
1963 if (unlikely(leap)) {
1964 struct timespec64 ts;
1965
1966 tk->xtime_sec += leap;
1967
1968 ts.tv_sec = leap;
1969 ts.tv_nsec = 0;
1970 tk_set_wall_to_mono(tk,
1971 timespec64_sub(tk->wall_to_monotonic, ts));
1972
1973 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1974
1975 clock_set = TK_CLOCK_WAS_SET;
1976 }
1977 }
1978 return clock_set;
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
1991 u32 shift, unsigned int *clock_set)
1992{
1993 u64 interval = tk->cycle_interval << shift;
1994 u64 snsec_per_sec;
1995
1996
1997 if (offset < interval)
1998 return offset;
1999
2000
2001 offset -= interval;
2002 tk->tkr_mono.cycle_last += interval;
2003 tk->tkr_raw.cycle_last += interval;
2004
2005 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
2006 *clock_set |= accumulate_nsecs_to_secs(tk);
2007
2008
2009 tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
2010 snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
2011 while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
2012 tk->tkr_raw.xtime_nsec -= snsec_per_sec;
2013 tk->raw_sec++;
2014 }
2015
2016
2017 tk->ntp_error += tk->ntp_tick << shift;
2018 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
2019 (tk->ntp_error_shift + shift);
2020
2021 return offset;
2022}
2023
2024
2025
2026
2027
2028void update_wall_time(void)
2029{
2030 struct timekeeper *real_tk = &tk_core.timekeeper;
2031 struct timekeeper *tk = &shadow_timekeeper;
2032 u64 offset;
2033 int shift = 0, maxshift;
2034 unsigned int clock_set = 0;
2035 unsigned long flags;
2036
2037 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2038
2039
2040 if (unlikely(timekeeping_suspended))
2041 goto out;
2042
2043#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
2044 offset = real_tk->cycle_interval;
2045#else
2046 offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
2047 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
2048#endif
2049
2050
2051 if (offset < real_tk->cycle_interval)
2052 goto out;
2053
2054
2055 timekeeping_check_update(tk, offset);
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065 shift = ilog2(offset) - ilog2(tk->cycle_interval);
2066 shift = max(0, shift);
2067
2068 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
2069 shift = min(shift, maxshift);
2070 while (offset >= tk->cycle_interval) {
2071 offset = logarithmic_accumulation(tk, offset, shift,
2072 &clock_set);
2073 if (offset < tk->cycle_interval<<shift)
2074 shift--;
2075 }
2076
2077
2078 timekeeping_adjust(tk, offset);
2079
2080
2081
2082
2083
2084 clock_set |= accumulate_nsecs_to_secs(tk);
2085
2086 write_seqcount_begin(&tk_core.seq);
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097 timekeeping_update(tk, clock_set);
2098 memcpy(real_tk, tk, sizeof(*tk));
2099
2100 write_seqcount_end(&tk_core.seq);
2101out:
2102 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2103 if (clock_set)
2104
2105 clock_was_set_delayed();
2106}
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119void getboottime64(struct timespec64 *ts)
2120{
2121 struct timekeeper *tk = &tk_core.timekeeper;
2122 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
2123
2124 *ts = ktime_to_timespec64(t);
2125}
2126EXPORT_SYMBOL_GPL(getboottime64);
2127
2128unsigned long get_seconds(void)
2129{
2130 struct timekeeper *tk = &tk_core.timekeeper;
2131
2132 return tk->xtime_sec;
2133}
2134EXPORT_SYMBOL(get_seconds);
2135
2136void ktime_get_coarse_real_ts64(struct timespec64 *ts)
2137{
2138 struct timekeeper *tk = &tk_core.timekeeper;
2139 unsigned long seq;
2140
2141 do {
2142 seq = read_seqcount_begin(&tk_core.seq);
2143
2144 *ts = tk_xtime(tk);
2145 } while (read_seqcount_retry(&tk_core.seq, seq));
2146}
2147EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
2148
2149void ktime_get_coarse_ts64(struct timespec64 *ts)
2150{
2151 struct timekeeper *tk = &tk_core.timekeeper;
2152 struct timespec64 now, mono;
2153 unsigned long seq;
2154
2155 do {
2156 seq = read_seqcount_begin(&tk_core.seq);
2157
2158 now = tk_xtime(tk);
2159 mono = tk->wall_to_monotonic;
2160 } while (read_seqcount_retry(&tk_core.seq, seq));
2161
2162 set_normalized_timespec64(ts, now.tv_sec + mono.tv_sec,
2163 now.tv_nsec + mono.tv_nsec);
2164}
2165EXPORT_SYMBOL(ktime_get_coarse_ts64);
2166
2167
2168
2169
2170void do_timer(unsigned long ticks)
2171{
2172 jiffies_64 += ticks;
2173 calc_global_load(ticks);
2174}
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
2190 ktime_t *offs_boot, ktime_t *offs_tai)
2191{
2192 struct timekeeper *tk = &tk_core.timekeeper;
2193 unsigned int seq;
2194 ktime_t base;
2195 u64 nsecs;
2196
2197 do {
2198 seq = read_seqcount_begin(&tk_core.seq);
2199
2200 base = tk->tkr_mono.base;
2201 nsecs = timekeeping_get_ns(&tk->tkr_mono);
2202 base = ktime_add_ns(base, nsecs);
2203
2204 if (*cwsseq != tk->clock_was_set_seq) {
2205 *cwsseq = tk->clock_was_set_seq;
2206 *offs_real = tk->offs_real;
2207 *offs_boot = tk->offs_boot;
2208 *offs_tai = tk->offs_tai;
2209 }
2210
2211
2212 if (unlikely(base >= tk->next_leap_ktime))
2213 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
2214
2215 } while (read_seqcount_retry(&tk_core.seq, seq));
2216
2217 return base;
2218}
2219
2220
2221
2222
2223static int timekeeping_validate_timex(struct timex *txc)
2224{
2225 if (txc->modes & ADJ_ADJTIME) {
2226
2227 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
2228 return -EINVAL;
2229 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
2230 !capable(CAP_SYS_TIME))
2231 return -EPERM;
2232 } else {
2233
2234 if (txc->modes && !capable(CAP_SYS_TIME))
2235 return -EPERM;
2236
2237
2238
2239
2240 if (txc->modes & ADJ_TICK &&
2241 (txc->tick < 900000/USER_HZ ||
2242 txc->tick > 1100000/USER_HZ))
2243 return -EINVAL;
2244 }
2245
2246 if (txc->modes & ADJ_SETOFFSET) {
2247
2248 if (!capable(CAP_SYS_TIME))
2249 return -EPERM;
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259 if (txc->time.tv_usec < 0)
2260 return -EINVAL;
2261
2262 if (txc->modes & ADJ_NANO) {
2263 if (txc->time.tv_usec >= NSEC_PER_SEC)
2264 return -EINVAL;
2265 } else {
2266 if (txc->time.tv_usec >= USEC_PER_SEC)
2267 return -EINVAL;
2268 }
2269 }
2270
2271
2272
2273
2274
2275 if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
2276 if (LLONG_MIN / PPM_SCALE > txc->freq)
2277 return -EINVAL;
2278 if (LLONG_MAX / PPM_SCALE < txc->freq)
2279 return -EINVAL;
2280 }
2281
2282 return 0;
2283}
2284
2285
2286
2287
2288
2289int do_adjtimex(struct timex *txc)
2290{
2291 struct timekeeper *tk = &tk_core.timekeeper;
2292 unsigned long flags;
2293 struct timespec64 ts;
2294 s32 orig_tai, tai;
2295 int ret;
2296
2297
2298 ret = timekeeping_validate_timex(txc);
2299 if (ret)
2300 return ret;
2301
2302 if (txc->modes & ADJ_SETOFFSET) {
2303 struct timespec64 delta;
2304 delta.tv_sec = txc->time.tv_sec;
2305 delta.tv_nsec = txc->time.tv_usec;
2306 if (!(txc->modes & ADJ_NANO))
2307 delta.tv_nsec *= 1000;
2308 ret = timekeeping_inject_offset(&delta);
2309 if (ret)
2310 return ret;
2311 }
2312
2313 getnstimeofday64(&ts);
2314
2315 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2316 write_seqcount_begin(&tk_core.seq);
2317
2318 orig_tai = tai = tk->tai_offset;
2319 ret = __do_adjtimex(txc, &ts, &tai);
2320
2321 if (tai != orig_tai) {
2322 __timekeeping_set_tai_offset(tk, tai);
2323 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2324 }
2325 tk_update_leap_state(tk);
2326
2327 write_seqcount_end(&tk_core.seq);
2328 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2329
2330 if (tai != orig_tai)
2331 clock_was_set();
2332
2333 ntp_notify_cmos_timer();
2334
2335 return ret;
2336}
2337
2338#ifdef CONFIG_NTP_PPS
2339
2340
2341
2342void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts)
2343{
2344 unsigned long flags;
2345
2346 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2347 write_seqcount_begin(&tk_core.seq);
2348
2349 __hardpps(phase_ts, raw_ts);
2350
2351 write_seqcount_end(&tk_core.seq);
2352 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2353}
2354EXPORT_SYMBOL(hardpps);
2355#endif
2356
2357
2358
2359
2360
2361
2362
2363void xtime_update(unsigned long ticks)
2364{
2365 write_seqlock(&jiffies_lock);
2366 do_timer(ticks);
2367 write_sequnlock(&jiffies_lock);
2368 update_wall_time();
2369}
2370