1
2
3
4
5
6
7
8
9
10
11#include <linux/timekeeper_internal.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/syscore_ops.h>
19#include <linux/clocksource.h>
20#include <linux/jiffies.h>
21#include <linux/time.h>
22#include <linux/tick.h>
23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h>
25#include <linux/compiler.h>
26
27#include "tick-internal.h"
28#include "ntp_internal.h"
29#include "timekeeping_internal.h"
30
31#define TK_CLEAR_NTP (1 << 0)
32#define TK_MIRROR (1 << 1)
33#define TK_CLOCK_WAS_SET (1 << 2)
34
35
36
37
38
39static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42} tk_core ____cacheline_aligned;
43
44static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45static struct timekeeper shadow_timekeeper;
46
47
48
49
50
51
52
53
54
55
56struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59};
60
61static struct tk_fast tk_fast_mono ____cacheline_aligned;
62static struct tk_fast tk_fast_raw ____cacheline_aligned;
63
64
65int __read_mostly timekeeping_suspended;
66
67static inline void tk_normalize_xtime(struct timekeeper *tk)
68{
69 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
70 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
71 tk->xtime_sec++;
72 }
73}
74
75static inline struct timespec64 tk_xtime(struct timekeeper *tk)
76{
77 struct timespec64 ts;
78
79 ts.tv_sec = tk->xtime_sec;
80 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
81 return ts;
82}
83
84static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
85{
86 tk->xtime_sec = ts->tv_sec;
87 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
88}
89
90static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
91{
92 tk->xtime_sec += ts->tv_sec;
93 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
94 tk_normalize_xtime(tk);
95}
96
97static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
98{
99 struct timespec64 tmp;
100
101
102
103
104
105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106 -tk->wall_to_monotonic.tv_nsec);
107 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
108 tk->wall_to_monotonic = wtm;
109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
110 tk->offs_real = timespec64_to_ktime(tmp);
111 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
112}
113
114static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
115{
116 tk->offs_boot = ktime_add(tk->offs_boot, delta);
117}
118
119#ifdef CONFIG_DEBUG_TIMEKEEPING
120#define WARNING_FREQ (HZ*300)
121
122static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
123{
124
125 cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
126 const char *name = tk->tkr_mono.clock->name;
127
128 if (offset > max_cycles) {
129 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
130 offset, name, max_cycles);
131 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
132 } else {
133 if (offset > (max_cycles >> 1)) {
134 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n",
135 offset, name, max_cycles >> 1);
136 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
137 }
138 }
139
140 if (tk->underflow_seen) {
141 if (jiffies - tk->last_warning > WARNING_FREQ) {
142 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
143 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
144 printk_deferred(" Your kernel is probably still fine.\n");
145 tk->last_warning = jiffies;
146 }
147 tk->underflow_seen = 0;
148 }
149
150 if (tk->overflow_seen) {
151 if (jiffies - tk->last_warning > WARNING_FREQ) {
152 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
153 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
154 printk_deferred(" Your kernel is probably still fine.\n");
155 tk->last_warning = jiffies;
156 }
157 tk->overflow_seen = 0;
158 }
159}
160
161static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
162{
163 struct timekeeper *tk = &tk_core.timekeeper;
164 cycle_t now, last, mask, max, delta;
165 unsigned int seq;
166
167
168
169
170
171
172
173
174 do {
175 seq = read_seqcount_begin(&tk_core.seq);
176 now = tkr->read(tkr->clock);
177 last = tkr->cycle_last;
178 mask = tkr->mask;
179 max = tkr->clock->max_cycles;
180 } while (read_seqcount_retry(&tk_core.seq, seq));
181
182 delta = clocksource_delta(now, last, mask);
183
184
185
186
187
188 if (unlikely((~delta & mask) < (mask >> 3))) {
189 tk->underflow_seen = 1;
190 delta = 0;
191 }
192
193
194 if (unlikely(delta > max)) {
195 tk->overflow_seen = 1;
196 delta = tkr->clock->max_cycles;
197 }
198
199 return delta;
200}
201#else
202static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
203{
204}
205static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
206{
207 cycle_t cycle_now, delta;
208
209
210 cycle_now = tkr->read(tkr->clock);
211
212
213 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
214
215 return delta;
216}
217#endif
218
219
220
221
222
223
224
225
226
227
228
229
230static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
231{
232 cycle_t interval;
233 u64 tmp, ntpinterval;
234 struct clocksource *old_clock;
235
236 old_clock = tk->tkr_mono.clock;
237 tk->tkr_mono.clock = clock;
238 tk->tkr_mono.read = clock->read;
239 tk->tkr_mono.mask = clock->mask;
240 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
241
242 tk->tkr_raw.clock = clock;
243 tk->tkr_raw.read = clock->read;
244 tk->tkr_raw.mask = clock->mask;
245 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
246
247
248 tmp = NTP_INTERVAL_LENGTH;
249 tmp <<= clock->shift;
250 ntpinterval = tmp;
251 tmp += clock->mult/2;
252 do_div(tmp, clock->mult);
253 if (tmp == 0)
254 tmp = 1;
255
256 interval = (cycle_t) tmp;
257 tk->cycle_interval = interval;
258
259
260 tk->xtime_interval = (u64) interval * clock->mult;
261 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
262 tk->raw_interval =
263 ((u64) interval * clock->mult) >> clock->shift;
264
265
266 if (old_clock) {
267 int shift_change = clock->shift - old_clock->shift;
268 if (shift_change < 0)
269 tk->tkr_mono.xtime_nsec >>= -shift_change;
270 else
271 tk->tkr_mono.xtime_nsec <<= shift_change;
272 }
273 tk->tkr_raw.xtime_nsec = 0;
274
275 tk->tkr_mono.shift = clock->shift;
276 tk->tkr_raw.shift = clock->shift;
277
278 tk->ntp_error = 0;
279 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
280 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
281
282
283
284
285
286
287 tk->tkr_mono.mult = clock->mult;
288 tk->tkr_raw.mult = clock->mult;
289 tk->ntp_err_mult = 0;
290}
291
292
293
294#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
295static u32 default_arch_gettimeoffset(void) { return 0; }
296u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
297#else
298static inline u32 arch_gettimeoffset(void) { return 0; }
299#endif
300
301static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
302{
303 cycle_t delta;
304 s64 nsec;
305
306 delta = timekeeping_get_delta(tkr);
307
308 nsec = delta * tkr->mult + tkr->xtime_nsec;
309 nsec >>= tkr->shift;
310
311
312 return nsec + arch_gettimeoffset();
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
330{
331 struct tk_read_base *base = tkf->base;
332
333
334 raw_write_seqcount_latch(&tkf->seq);
335
336
337 memcpy(base, tkr, sizeof(*base));
338
339
340 raw_write_seqcount_latch(&tkf->seq);
341
342
343 memcpy(base + 1, base, sizeof(*base));
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
379{
380 struct tk_read_base *tkr;
381 unsigned int seq;
382 u64 now;
383
384 do {
385 seq = raw_read_seqcount_latch(&tkf->seq);
386 tkr = tkf->base + (seq & 0x01);
387 now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
388 } while (read_seqcount_retry(&tkf->seq, seq));
389
390 return now;
391}
392
393u64 ktime_get_mono_fast_ns(void)
394{
395 return __ktime_get_fast_ns(&tk_fast_mono);
396}
397EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
398
399u64 ktime_get_raw_fast_ns(void)
400{
401 return __ktime_get_fast_ns(&tk_fast_raw);
402}
403EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
404
405
406static cycle_t cycles_at_suspend;
407
408static cycle_t dummy_clock_read(struct clocksource *cs)
409{
410 return cycles_at_suspend;
411}
412
413
414
415
416
417
418
419
420
421
422
423static void halt_fast_timekeeper(struct timekeeper *tk)
424{
425 static struct tk_read_base tkr_dummy;
426 struct tk_read_base *tkr = &tk->tkr_mono;
427
428 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
429 cycles_at_suspend = tkr->read(tkr->clock);
430 tkr_dummy.read = dummy_clock_read;
431 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
432
433 tkr = &tk->tkr_raw;
434 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
435 tkr_dummy.read = dummy_clock_read;
436 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
437}
438
439#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
440
441static inline void update_vsyscall(struct timekeeper *tk)
442{
443 struct timespec xt, wm;
444
445 xt = timespec64_to_timespec(tk_xtime(tk));
446 wm = timespec64_to_timespec(tk->wall_to_monotonic);
447 update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
448 tk->tkr_mono.cycle_last);
449}
450
451static inline void old_vsyscall_fixup(struct timekeeper *tk)
452{
453 s64 remainder;
454
455
456
457
458
459
460
461
462
463
464
465 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
466 tk->tkr_mono.xtime_nsec -= remainder;
467 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
468 tk->ntp_error += remainder << tk->ntp_error_shift;
469 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
470}
471#else
472#define old_vsyscall_fixup(tk)
473#endif
474
475static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
476
477static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
478{
479 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
480}
481
482
483
484
485int pvclock_gtod_register_notifier(struct notifier_block *nb)
486{
487 struct timekeeper *tk = &tk_core.timekeeper;
488 unsigned long flags;
489 int ret;
490
491 raw_spin_lock_irqsave(&timekeeper_lock, flags);
492 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
493 update_pvclock_gtod(tk, true);
494 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
495
496 return ret;
497}
498EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
499
500
501
502
503
504int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
505{
506 unsigned long flags;
507 int ret;
508
509 raw_spin_lock_irqsave(&timekeeper_lock, flags);
510 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
511 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
512
513 return ret;
514}
515EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
516
517
518
519
520static inline void tk_update_leap_state(struct timekeeper *tk)
521{
522 tk->next_leap_ktime = ntp_get_next_leap();
523 if (tk->next_leap_ktime.tv64 != KTIME_MAX)
524
525 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
526}
527
528
529
530
531static inline void tk_update_ktime_data(struct timekeeper *tk)
532{
533 u64 seconds;
534 u32 nsec;
535
536
537
538
539
540
541
542
543 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
544 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
545 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
546
547
548 tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
549
550
551
552
553
554
555 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
556 if (nsec >= NSEC_PER_SEC)
557 seconds++;
558 tk->ktime_sec = seconds;
559}
560
561
562static void timekeeping_update(struct timekeeper *tk, unsigned int action)
563{
564 if (action & TK_CLEAR_NTP) {
565 tk->ntp_error = 0;
566 ntp_clear();
567 }
568
569 tk_update_leap_state(tk);
570 tk_update_ktime_data(tk);
571
572 update_vsyscall(tk);
573 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
574
575 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
576 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
577
578 if (action & TK_CLOCK_WAS_SET)
579 tk->clock_was_set_seq++;
580
581
582
583
584
585 if (action & TK_MIRROR)
586 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
587 sizeof(tk_core.timekeeper));
588}
589
590
591
592
593
594
595
596
597static void timekeeping_forward_now(struct timekeeper *tk)
598{
599 struct clocksource *clock = tk->tkr_mono.clock;
600 cycle_t cycle_now, delta;
601 s64 nsec;
602
603 cycle_now = tk->tkr_mono.read(clock);
604 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
605 tk->tkr_mono.cycle_last = cycle_now;
606 tk->tkr_raw.cycle_last = cycle_now;
607
608 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
609
610
611 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
612
613 tk_normalize_xtime(tk);
614
615 nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
616 timespec64_add_ns(&tk->raw_time, nsec);
617}
618
619
620
621
622
623
624
625
626int __getnstimeofday64(struct timespec64 *ts)
627{
628 struct timekeeper *tk = &tk_core.timekeeper;
629 unsigned long seq;
630 s64 nsecs = 0;
631
632 do {
633 seq = read_seqcount_begin(&tk_core.seq);
634
635 ts->tv_sec = tk->xtime_sec;
636 nsecs = timekeeping_get_ns(&tk->tkr_mono);
637
638 } while (read_seqcount_retry(&tk_core.seq, seq));
639
640 ts->tv_nsec = 0;
641 timespec64_add_ns(ts, nsecs);
642
643
644
645
646
647 if (unlikely(timekeeping_suspended))
648 return -EAGAIN;
649 return 0;
650}
651EXPORT_SYMBOL(__getnstimeofday64);
652
653
654
655
656
657
658
659void getnstimeofday64(struct timespec64 *ts)
660{
661 WARN_ON(__getnstimeofday64(ts));
662}
663EXPORT_SYMBOL(getnstimeofday64);
664
665ktime_t ktime_get(void)
666{
667 struct timekeeper *tk = &tk_core.timekeeper;
668 unsigned int seq;
669 ktime_t base;
670 s64 nsecs;
671
672 WARN_ON(timekeeping_suspended);
673
674 do {
675 seq = read_seqcount_begin(&tk_core.seq);
676 base = tk->tkr_mono.base;
677 nsecs = timekeeping_get_ns(&tk->tkr_mono);
678
679 } while (read_seqcount_retry(&tk_core.seq, seq));
680
681 return ktime_add_ns(base, nsecs);
682}
683EXPORT_SYMBOL_GPL(ktime_get);
684
685u32 ktime_get_resolution_ns(void)
686{
687 struct timekeeper *tk = &tk_core.timekeeper;
688 unsigned int seq;
689 u32 nsecs;
690
691 WARN_ON(timekeeping_suspended);
692
693 do {
694 seq = read_seqcount_begin(&tk_core.seq);
695 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
696 } while (read_seqcount_retry(&tk_core.seq, seq));
697
698 return nsecs;
699}
700EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
701
702static ktime_t *offsets[TK_OFFS_MAX] = {
703 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
704 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
705 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
706};
707
708ktime_t ktime_get_with_offset(enum tk_offsets offs)
709{
710 struct timekeeper *tk = &tk_core.timekeeper;
711 unsigned int seq;
712 ktime_t base, *offset = offsets[offs];
713 s64 nsecs;
714
715 WARN_ON(timekeeping_suspended);
716
717 do {
718 seq = read_seqcount_begin(&tk_core.seq);
719 base = ktime_add(tk->tkr_mono.base, *offset);
720 nsecs = timekeeping_get_ns(&tk->tkr_mono);
721
722 } while (read_seqcount_retry(&tk_core.seq, seq));
723
724 return ktime_add_ns(base, nsecs);
725
726}
727EXPORT_SYMBOL_GPL(ktime_get_with_offset);
728
729
730
731
732
733
734ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
735{
736 ktime_t *offset = offsets[offs];
737 unsigned long seq;
738 ktime_t tconv;
739
740 do {
741 seq = read_seqcount_begin(&tk_core.seq);
742 tconv = ktime_add(tmono, *offset);
743 } while (read_seqcount_retry(&tk_core.seq, seq));
744
745 return tconv;
746}
747EXPORT_SYMBOL_GPL(ktime_mono_to_any);
748
749
750
751
752ktime_t ktime_get_raw(void)
753{
754 struct timekeeper *tk = &tk_core.timekeeper;
755 unsigned int seq;
756 ktime_t base;
757 s64 nsecs;
758
759 do {
760 seq = read_seqcount_begin(&tk_core.seq);
761 base = tk->tkr_raw.base;
762 nsecs = timekeeping_get_ns(&tk->tkr_raw);
763
764 } while (read_seqcount_retry(&tk_core.seq, seq));
765
766 return ktime_add_ns(base, nsecs);
767}
768EXPORT_SYMBOL_GPL(ktime_get_raw);
769
770
771
772
773
774
775
776
777
778void ktime_get_ts64(struct timespec64 *ts)
779{
780 struct timekeeper *tk = &tk_core.timekeeper;
781 struct timespec64 tomono;
782 s64 nsec;
783 unsigned int seq;
784
785 WARN_ON(timekeeping_suspended);
786
787 do {
788 seq = read_seqcount_begin(&tk_core.seq);
789 ts->tv_sec = tk->xtime_sec;
790 nsec = timekeeping_get_ns(&tk->tkr_mono);
791 tomono = tk->wall_to_monotonic;
792
793 } while (read_seqcount_retry(&tk_core.seq, seq));
794
795 ts->tv_sec += tomono.tv_sec;
796 ts->tv_nsec = 0;
797 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
798}
799EXPORT_SYMBOL_GPL(ktime_get_ts64);
800
801
802
803
804
805
806
807
808
809
810time64_t ktime_get_seconds(void)
811{
812 struct timekeeper *tk = &tk_core.timekeeper;
813
814 WARN_ON(timekeeping_suspended);
815 return tk->ktime_sec;
816}
817EXPORT_SYMBOL_GPL(ktime_get_seconds);
818
819
820
821
822
823
824
825
826
827
828
829
830time64_t ktime_get_real_seconds(void)
831{
832 struct timekeeper *tk = &tk_core.timekeeper;
833 time64_t seconds;
834 unsigned int seq;
835
836 if (IS_ENABLED(CONFIG_64BIT))
837 return tk->xtime_sec;
838
839 do {
840 seq = read_seqcount_begin(&tk_core.seq);
841 seconds = tk->xtime_sec;
842
843 } while (read_seqcount_retry(&tk_core.seq, seq));
844
845 return seconds;
846}
847EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
848
849#ifdef CONFIG_NTP_PPS
850
851
852
853
854
855
856
857
858
859
860void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
861{
862 struct timekeeper *tk = &tk_core.timekeeper;
863 unsigned long seq;
864 s64 nsecs_raw, nsecs_real;
865
866 WARN_ON_ONCE(timekeeping_suspended);
867
868 do {
869 seq = read_seqcount_begin(&tk_core.seq);
870
871 *ts_raw = timespec64_to_timespec(tk->raw_time);
872 ts_real->tv_sec = tk->xtime_sec;
873 ts_real->tv_nsec = 0;
874
875 nsecs_raw = timekeeping_get_ns(&tk->tkr_raw);
876 nsecs_real = timekeeping_get_ns(&tk->tkr_mono);
877
878 } while (read_seqcount_retry(&tk_core.seq, seq));
879
880 timespec_add_ns(ts_raw, nsecs_raw);
881 timespec_add_ns(ts_real, nsecs_real);
882}
883EXPORT_SYMBOL(getnstime_raw_and_real);
884
885#endif
886
887
888
889
890
891
892
893void do_gettimeofday(struct timeval *tv)
894{
895 struct timespec64 now;
896
897 getnstimeofday64(&now);
898 tv->tv_sec = now.tv_sec;
899 tv->tv_usec = now.tv_nsec/1000;
900}
901EXPORT_SYMBOL(do_gettimeofday);
902
903
904
905
906
907
908
909int do_settimeofday64(const struct timespec64 *ts)
910{
911 struct timekeeper *tk = &tk_core.timekeeper;
912 struct timespec64 ts_delta, xt;
913 unsigned long flags;
914 int ret = 0;
915
916 if (!timespec64_valid_strict(ts))
917 return -EINVAL;
918
919 raw_spin_lock_irqsave(&timekeeper_lock, flags);
920 write_seqcount_begin(&tk_core.seq);
921
922 timekeeping_forward_now(tk);
923
924 xt = tk_xtime(tk);
925 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
926 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
927
928 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
929 ret = -EINVAL;
930 goto out;
931 }
932
933 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
934
935 tk_set_xtime(tk, ts);
936out:
937 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
938
939 write_seqcount_end(&tk_core.seq);
940 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
941
942
943 clock_was_set();
944
945 return ret;
946}
947EXPORT_SYMBOL(do_settimeofday64);
948
949
950
951
952
953
954
955int timekeeping_inject_offset(struct timespec *ts)
956{
957 struct timekeeper *tk = &tk_core.timekeeper;
958 unsigned long flags;
959 struct timespec64 ts64, tmp;
960 int ret = 0;
961
962 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
963 return -EINVAL;
964
965 ts64 = timespec_to_timespec64(*ts);
966
967 raw_spin_lock_irqsave(&timekeeper_lock, flags);
968 write_seqcount_begin(&tk_core.seq);
969
970 timekeeping_forward_now(tk);
971
972
973 tmp = timespec64_add(tk_xtime(tk), ts64);
974 if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
975 !timespec64_valid_strict(&tmp)) {
976 ret = -EINVAL;
977 goto error;
978 }
979
980 tk_xtime_add(tk, &ts64);
981 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
982
983error:
984 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
985
986 write_seqcount_end(&tk_core.seq);
987 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
988
989
990 clock_was_set();
991
992 return ret;
993}
994EXPORT_SYMBOL(timekeeping_inject_offset);
995
996
997
998
999
1000
1001s32 timekeeping_get_tai_offset(void)
1002{
1003 struct timekeeper *tk = &tk_core.timekeeper;
1004 unsigned int seq;
1005 s32 ret;
1006
1007 do {
1008 seq = read_seqcount_begin(&tk_core.seq);
1009 ret = tk->tai_offset;
1010 } while (read_seqcount_retry(&tk_core.seq, seq));
1011
1012 return ret;
1013}
1014
1015
1016
1017
1018
1019static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1020{
1021 tk->tai_offset = tai_offset;
1022 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1023}
1024
1025
1026
1027
1028
1029void timekeeping_set_tai_offset(s32 tai_offset)
1030{
1031 struct timekeeper *tk = &tk_core.timekeeper;
1032 unsigned long flags;
1033
1034 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1035 write_seqcount_begin(&tk_core.seq);
1036 __timekeeping_set_tai_offset(tk, tai_offset);
1037 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1038 write_seqcount_end(&tk_core.seq);
1039 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1040 clock_was_set();
1041}
1042
1043
1044
1045
1046
1047
1048static int change_clocksource(void *data)
1049{
1050 struct timekeeper *tk = &tk_core.timekeeper;
1051 struct clocksource *new, *old;
1052 unsigned long flags;
1053
1054 new = (struct clocksource *) data;
1055
1056 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1057 write_seqcount_begin(&tk_core.seq);
1058
1059 timekeeping_forward_now(tk);
1060
1061
1062
1063
1064 if (try_module_get(new->owner)) {
1065 if (!new->enable || new->enable(new) == 0) {
1066 old = tk->tkr_mono.clock;
1067 tk_setup_internals(tk, new);
1068 if (old->disable)
1069 old->disable(old);
1070 module_put(old->owner);
1071 } else {
1072 module_put(new->owner);
1073 }
1074 }
1075 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1076
1077 write_seqcount_end(&tk_core.seq);
1078 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1079
1080 return 0;
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090int timekeeping_notify(struct clocksource *clock)
1091{
1092 struct timekeeper *tk = &tk_core.timekeeper;
1093
1094 if (tk->tkr_mono.clock == clock)
1095 return 0;
1096 stop_machine(change_clocksource, clock, NULL);
1097 tick_clock_notify();
1098 return tk->tkr_mono.clock == clock ? 0 : -1;
1099}
1100
1101
1102
1103
1104
1105
1106
1107void getrawmonotonic64(struct timespec64 *ts)
1108{
1109 struct timekeeper *tk = &tk_core.timekeeper;
1110 struct timespec64 ts64;
1111 unsigned long seq;
1112 s64 nsecs;
1113
1114 do {
1115 seq = read_seqcount_begin(&tk_core.seq);
1116 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1117 ts64 = tk->raw_time;
1118
1119 } while (read_seqcount_retry(&tk_core.seq, seq));
1120
1121 timespec64_add_ns(&ts64, nsecs);
1122 *ts = ts64;
1123}
1124EXPORT_SYMBOL(getrawmonotonic64);
1125
1126
1127
1128
1129
1130int timekeeping_valid_for_hres(void)
1131{
1132 struct timekeeper *tk = &tk_core.timekeeper;
1133 unsigned long seq;
1134 int ret;
1135
1136 do {
1137 seq = read_seqcount_begin(&tk_core.seq);
1138
1139 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1140
1141 } while (read_seqcount_retry(&tk_core.seq, seq));
1142
1143 return ret;
1144}
1145
1146
1147
1148
1149u64 timekeeping_max_deferment(void)
1150{
1151 struct timekeeper *tk = &tk_core.timekeeper;
1152 unsigned long seq;
1153 u64 ret;
1154
1155 do {
1156 seq = read_seqcount_begin(&tk_core.seq);
1157
1158 ret = tk->tkr_mono.clock->max_idle_ns;
1159
1160 } while (read_seqcount_retry(&tk_core.seq, seq));
1161
1162 return ret;
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174void __weak read_persistent_clock(struct timespec *ts)
1175{
1176 ts->tv_sec = 0;
1177 ts->tv_nsec = 0;
1178}
1179
1180void __weak read_persistent_clock64(struct timespec64 *ts64)
1181{
1182 struct timespec ts;
1183
1184 read_persistent_clock(&ts);
1185 *ts64 = timespec_to_timespec64(ts);
1186}
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197void __weak read_boot_clock64(struct timespec64 *ts)
1198{
1199 ts->tv_sec = 0;
1200 ts->tv_nsec = 0;
1201}
1202
1203
1204static bool sleeptime_injected;
1205
1206
1207static bool persistent_clock_exists;
1208
1209
1210
1211
1212void __init timekeeping_init(void)
1213{
1214 struct timekeeper *tk = &tk_core.timekeeper;
1215 struct clocksource *clock;
1216 unsigned long flags;
1217 struct timespec64 now, boot, tmp;
1218
1219 read_persistent_clock64(&now);
1220 if (!timespec64_valid_strict(&now)) {
1221 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1222 " Check your CMOS/BIOS settings.\n");
1223 now.tv_sec = 0;
1224 now.tv_nsec = 0;
1225 } else if (now.tv_sec || now.tv_nsec)
1226 persistent_clock_exists = true;
1227
1228 read_boot_clock64(&boot);
1229 if (!timespec64_valid_strict(&boot)) {
1230 pr_warn("WARNING: Boot clock returned invalid value!\n"
1231 " Check your CMOS/BIOS settings.\n");
1232 boot.tv_sec = 0;
1233 boot.tv_nsec = 0;
1234 }
1235
1236 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1237 write_seqcount_begin(&tk_core.seq);
1238 ntp_init();
1239
1240 clock = clocksource_default_clock();
1241 if (clock->enable)
1242 clock->enable(clock);
1243 tk_setup_internals(tk, clock);
1244
1245 tk_set_xtime(tk, &now);
1246 tk->raw_time.tv_sec = 0;
1247 tk->raw_time.tv_nsec = 0;
1248 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1249 boot = tk_xtime(tk);
1250
1251 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1252 tk_set_wall_to_mono(tk, tmp);
1253
1254 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1255
1256 write_seqcount_end(&tk_core.seq);
1257 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1258}
1259
1260
1261static struct timespec64 timekeeping_suspend_time;
1262
1263
1264
1265
1266
1267
1268
1269
1270static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1271 struct timespec64 *delta)
1272{
1273 if (!timespec64_valid_strict(delta)) {
1274 printk_deferred(KERN_WARNING
1275 "__timekeeping_inject_sleeptime: Invalid "
1276 "sleep delta value!\n");
1277 return;
1278 }
1279 tk_xtime_add(tk, delta);
1280 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1281 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1282 tk_debug_account_sleep_time(delta);
1283}
1284
1285#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302bool timekeeping_rtc_skipresume(void)
1303{
1304 return sleeptime_injected;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316bool timekeeping_rtc_skipsuspend(void)
1317{
1318 return persistent_clock_exists;
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1333{
1334 struct timekeeper *tk = &tk_core.timekeeper;
1335 unsigned long flags;
1336
1337 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1338 write_seqcount_begin(&tk_core.seq);
1339
1340 timekeeping_forward_now(tk);
1341
1342 __timekeeping_inject_sleeptime(tk, delta);
1343
1344 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1345
1346 write_seqcount_end(&tk_core.seq);
1347 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1348
1349
1350 clock_was_set();
1351}
1352#endif
1353
1354
1355
1356
1357void timekeeping_resume(void)
1358{
1359 struct timekeeper *tk = &tk_core.timekeeper;
1360 struct clocksource *clock = tk->tkr_mono.clock;
1361 unsigned long flags;
1362 struct timespec64 ts_new, ts_delta;
1363 cycle_t cycle_now, cycle_delta;
1364
1365 sleeptime_injected = false;
1366 read_persistent_clock64(&ts_new);
1367
1368 clockevents_resume();
1369 clocksource_resume();
1370
1371 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1372 write_seqcount_begin(&tk_core.seq);
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386 cycle_now = tk->tkr_mono.read(clock);
1387 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1388 cycle_now > tk->tkr_mono.cycle_last) {
1389 u64 num, max = ULLONG_MAX;
1390 u32 mult = clock->mult;
1391 u32 shift = clock->shift;
1392 s64 nsec = 0;
1393
1394 cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1395 tk->tkr_mono.mask);
1396
1397
1398
1399
1400
1401
1402 do_div(max, mult);
1403 if (cycle_delta > max) {
1404 num = div64_u64(cycle_delta, max);
1405 nsec = (((u64) max * mult) >> shift) * num;
1406 cycle_delta -= num * max;
1407 }
1408 nsec += ((u64) cycle_delta * mult) >> shift;
1409
1410 ts_delta = ns_to_timespec64(nsec);
1411 sleeptime_injected = true;
1412 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1413 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1414 sleeptime_injected = true;
1415 }
1416
1417 if (sleeptime_injected)
1418 __timekeeping_inject_sleeptime(tk, &ts_delta);
1419
1420
1421 tk->tkr_mono.cycle_last = cycle_now;
1422 tk->tkr_raw.cycle_last = cycle_now;
1423
1424 tk->ntp_error = 0;
1425 timekeeping_suspended = 0;
1426 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1427 write_seqcount_end(&tk_core.seq);
1428 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1429
1430 touch_softlockup_watchdog();
1431
1432 tick_resume();
1433 hrtimers_resume();
1434}
1435
1436int timekeeping_suspend(void)
1437{
1438 struct timekeeper *tk = &tk_core.timekeeper;
1439 unsigned long flags;
1440 struct timespec64 delta, delta_delta;
1441 static struct timespec64 old_delta;
1442
1443 read_persistent_clock64(&timekeeping_suspend_time);
1444
1445
1446
1447
1448
1449
1450 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1451 persistent_clock_exists = true;
1452
1453 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1454 write_seqcount_begin(&tk_core.seq);
1455 timekeeping_forward_now(tk);
1456 timekeeping_suspended = 1;
1457
1458 if (persistent_clock_exists) {
1459
1460
1461
1462
1463
1464
1465 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1466 delta_delta = timespec64_sub(delta, old_delta);
1467 if (abs(delta_delta.tv_sec) >= 2) {
1468
1469
1470
1471
1472 old_delta = delta;
1473 } else {
1474
1475 timekeeping_suspend_time =
1476 timespec64_add(timekeeping_suspend_time, delta_delta);
1477 }
1478 }
1479
1480 timekeeping_update(tk, TK_MIRROR);
1481 halt_fast_timekeeper(tk);
1482 write_seqcount_end(&tk_core.seq);
1483 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1484
1485 tick_suspend();
1486 clocksource_suspend();
1487 clockevents_suspend();
1488
1489 return 0;
1490}
1491
1492
1493static struct syscore_ops timekeeping_syscore_ops = {
1494 .resume = timekeeping_resume,
1495 .suspend = timekeeping_suspend,
1496};
1497
1498static int __init timekeeping_init_ops(void)
1499{
1500 register_syscore_ops(&timekeeping_syscore_ops);
1501 return 0;
1502}
1503device_initcall(timekeeping_init_ops);
1504
1505
1506
1507
1508static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1509 s64 offset,
1510 bool negative,
1511 int adj_scale)
1512{
1513 s64 interval = tk->cycle_interval;
1514 s32 mult_adj = 1;
1515
1516 if (negative) {
1517 mult_adj = -mult_adj;
1518 interval = -interval;
1519 offset = -offset;
1520 }
1521 mult_adj <<= adj_scale;
1522 interval <<= adj_scale;
1523 offset <<= adj_scale;
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1575
1576 WARN_ON_ONCE(1);
1577 return;
1578 }
1579
1580 tk->tkr_mono.mult += mult_adj;
1581 tk->xtime_interval += interval;
1582 tk->tkr_mono.xtime_nsec -= offset;
1583 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1584}
1585
1586
1587
1588
1589
1590static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1591 s64 offset)
1592{
1593 s64 interval = tk->cycle_interval;
1594 s64 xinterval = tk->xtime_interval;
1595 s64 tick_error;
1596 bool negative;
1597 u32 adj;
1598
1599
1600 if (tk->ntp_err_mult)
1601 xinterval -= tk->cycle_interval;
1602
1603 tk->ntp_tick = ntp_tick_length();
1604
1605
1606 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1607 tick_error -= (xinterval + tk->xtime_remainder);
1608
1609
1610 if (likely((tick_error >= 0) && (tick_error <= interval)))
1611 return;
1612
1613
1614 negative = (tick_error < 0);
1615
1616
1617 tick_error = abs64(tick_error);
1618 for (adj = 0; tick_error > interval; adj++)
1619 tick_error >>= 1;
1620
1621
1622 timekeeping_apply_adjustment(tk, offset, negative, adj);
1623}
1624
1625
1626
1627
1628
1629static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1630{
1631
1632 timekeeping_freqadjust(tk, offset);
1633
1634
1635 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1636 tk->ntp_err_mult = 1;
1637 timekeeping_apply_adjustment(tk, offset, 0, 0);
1638 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1639
1640 timekeeping_apply_adjustment(tk, offset, 1, 0);
1641 tk->ntp_err_mult = 0;
1642 }
1643
1644 if (unlikely(tk->tkr_mono.clock->maxadj &&
1645 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1646 > tk->tkr_mono.clock->maxadj))) {
1647 printk_once(KERN_WARNING
1648 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1649 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1650 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1651 }
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1668 s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1669 tk->tkr_mono.xtime_nsec = 0;
1670 tk->ntp_error += neg << tk->ntp_error_shift;
1671 }
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1683{
1684 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1685 unsigned int clock_set = 0;
1686
1687 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1688 int leap;
1689
1690 tk->tkr_mono.xtime_nsec -= nsecps;
1691 tk->xtime_sec++;
1692
1693
1694 leap = second_overflow(tk->xtime_sec);
1695 if (unlikely(leap)) {
1696 struct timespec64 ts;
1697
1698 tk->xtime_sec += leap;
1699
1700 ts.tv_sec = leap;
1701 ts.tv_nsec = 0;
1702 tk_set_wall_to_mono(tk,
1703 timespec64_sub(tk->wall_to_monotonic, ts));
1704
1705 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1706
1707 clock_set = TK_CLOCK_WAS_SET;
1708 }
1709 }
1710 return clock_set;
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1723 u32 shift,
1724 unsigned int *clock_set)
1725{
1726 cycle_t interval = tk->cycle_interval << shift;
1727 u64 raw_nsecs;
1728
1729
1730 if (offset < interval)
1731 return offset;
1732
1733
1734 offset -= interval;
1735 tk->tkr_mono.cycle_last += interval;
1736 tk->tkr_raw.cycle_last += interval;
1737
1738 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
1739 *clock_set |= accumulate_nsecs_to_secs(tk);
1740
1741
1742 raw_nsecs = (u64)tk->raw_interval << shift;
1743 raw_nsecs += tk->raw_time.tv_nsec;
1744 if (raw_nsecs >= NSEC_PER_SEC) {
1745 u64 raw_secs = raw_nsecs;
1746 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1747 tk->raw_time.tv_sec += raw_secs;
1748 }
1749 tk->raw_time.tv_nsec = raw_nsecs;
1750
1751
1752 tk->ntp_error += tk->ntp_tick << shift;
1753 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1754 (tk->ntp_error_shift + shift);
1755
1756 return offset;
1757}
1758
1759
1760
1761
1762
1763void update_wall_time(void)
1764{
1765 struct timekeeper *real_tk = &tk_core.timekeeper;
1766 struct timekeeper *tk = &shadow_timekeeper;
1767 cycle_t offset;
1768 int shift = 0, maxshift;
1769 unsigned int clock_set = 0;
1770 unsigned long flags;
1771
1772 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1773
1774
1775 if (unlikely(timekeeping_suspended))
1776 goto out;
1777
1778#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1779 offset = real_tk->cycle_interval;
1780#else
1781 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
1782 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
1783#endif
1784
1785
1786 if (offset < real_tk->cycle_interval)
1787 goto out;
1788
1789
1790 timekeeping_check_update(real_tk, offset);
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1801 shift = max(0, shift);
1802
1803 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1804 shift = min(shift, maxshift);
1805 while (offset >= tk->cycle_interval) {
1806 offset = logarithmic_accumulation(tk, offset, shift,
1807 &clock_set);
1808 if (offset < tk->cycle_interval<<shift)
1809 shift--;
1810 }
1811
1812
1813 timekeeping_adjust(tk, offset);
1814
1815
1816
1817
1818
1819 old_vsyscall_fixup(tk);
1820
1821
1822
1823
1824
1825 clock_set |= accumulate_nsecs_to_secs(tk);
1826
1827 write_seqcount_begin(&tk_core.seq);
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 timekeeping_update(tk, clock_set);
1839 memcpy(real_tk, tk, sizeof(*tk));
1840
1841 write_seqcount_end(&tk_core.seq);
1842out:
1843 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1844 if (clock_set)
1845
1846 clock_was_set_delayed();
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860void getboottime64(struct timespec64 *ts)
1861{
1862 struct timekeeper *tk = &tk_core.timekeeper;
1863 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
1864
1865 *ts = ktime_to_timespec64(t);
1866}
1867EXPORT_SYMBOL_GPL(getboottime64);
1868
1869unsigned long get_seconds(void)
1870{
1871 struct timekeeper *tk = &tk_core.timekeeper;
1872
1873 return tk->xtime_sec;
1874}
1875EXPORT_SYMBOL(get_seconds);
1876
1877struct timespec __current_kernel_time(void)
1878{
1879 struct timekeeper *tk = &tk_core.timekeeper;
1880
1881 return timespec64_to_timespec(tk_xtime(tk));
1882}
1883
1884struct timespec64 current_kernel_time64(void)
1885{
1886 struct timekeeper *tk = &tk_core.timekeeper;
1887 struct timespec64 now;
1888 unsigned long seq;
1889
1890 do {
1891 seq = read_seqcount_begin(&tk_core.seq);
1892
1893 now = tk_xtime(tk);
1894 } while (read_seqcount_retry(&tk_core.seq, seq));
1895
1896 return now;
1897}
1898EXPORT_SYMBOL(current_kernel_time64);
1899
1900struct timespec64 get_monotonic_coarse64(void)
1901{
1902 struct timekeeper *tk = &tk_core.timekeeper;
1903 struct timespec64 now, mono;
1904 unsigned long seq;
1905
1906 do {
1907 seq = read_seqcount_begin(&tk_core.seq);
1908
1909 now = tk_xtime(tk);
1910 mono = tk->wall_to_monotonic;
1911 } while (read_seqcount_retry(&tk_core.seq, seq));
1912
1913 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
1914 now.tv_nsec + mono.tv_nsec);
1915
1916 return now;
1917}
1918
1919
1920
1921
1922void do_timer(unsigned long ticks)
1923{
1924 jiffies_64 += ticks;
1925 calc_global_load(ticks);
1926}
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
1942 ktime_t *offs_boot, ktime_t *offs_tai)
1943{
1944 struct timekeeper *tk = &tk_core.timekeeper;
1945 unsigned int seq;
1946 ktime_t base;
1947 u64 nsecs;
1948
1949 do {
1950 seq = read_seqcount_begin(&tk_core.seq);
1951
1952 base = tk->tkr_mono.base;
1953 nsecs = timekeeping_get_ns(&tk->tkr_mono);
1954 base = ktime_add_ns(base, nsecs);
1955
1956 if (*cwsseq != tk->clock_was_set_seq) {
1957 *cwsseq = tk->clock_was_set_seq;
1958 *offs_real = tk->offs_real;
1959 *offs_boot = tk->offs_boot;
1960 *offs_tai = tk->offs_tai;
1961 }
1962
1963
1964 if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64))
1965 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
1966
1967 } while (read_seqcount_retry(&tk_core.seq, seq));
1968
1969 return base;
1970}
1971
1972
1973
1974
1975int do_adjtimex(struct timex *txc)
1976{
1977 struct timekeeper *tk = &tk_core.timekeeper;
1978 unsigned long flags;
1979 struct timespec64 ts;
1980 s32 orig_tai, tai;
1981 int ret;
1982
1983
1984 ret = ntp_validate_timex(txc);
1985 if (ret)
1986 return ret;
1987
1988 if (txc->modes & ADJ_SETOFFSET) {
1989 struct timespec delta;
1990 delta.tv_sec = txc->time.tv_sec;
1991 delta.tv_nsec = txc->time.tv_usec;
1992 if (!(txc->modes & ADJ_NANO))
1993 delta.tv_nsec *= 1000;
1994 ret = timekeeping_inject_offset(&delta);
1995 if (ret)
1996 return ret;
1997 }
1998
1999 getnstimeofday64(&ts);
2000
2001 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2002 write_seqcount_begin(&tk_core.seq);
2003
2004 orig_tai = tai = tk->tai_offset;
2005 ret = __do_adjtimex(txc, &ts, &tai);
2006
2007 if (tai != orig_tai) {
2008 __timekeeping_set_tai_offset(tk, tai);
2009 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2010 }
2011 tk_update_leap_state(tk);
2012
2013 write_seqcount_end(&tk_core.seq);
2014 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2015
2016 if (tai != orig_tai)
2017 clock_was_set();
2018
2019 ntp_notify_cmos_timer();
2020
2021 return ret;
2022}
2023
2024#ifdef CONFIG_NTP_PPS
2025
2026
2027
2028void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
2029{
2030 unsigned long flags;
2031
2032 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2033 write_seqcount_begin(&tk_core.seq);
2034
2035 __hardpps(phase_ts, raw_ts);
2036
2037 write_seqcount_end(&tk_core.seq);
2038 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2039}
2040EXPORT_SYMBOL(hardpps);
2041#endif
2042
2043
2044
2045
2046
2047
2048
2049void xtime_update(unsigned long ticks)
2050{
2051 write_seqlock(&jiffies_lock);
2052 do_timer(ticks);
2053 write_sequnlock(&jiffies_lock);
2054 update_wall_time();
2055}
2056