1
2
3
4
5
6
7
8
9
10
11#include <linux/timekeeper_internal.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/syscore_ops.h>
19#include <linux/clocksource.h>
20#include <linux/jiffies.h>
21#include <linux/time.h>
22#include <linux/tick.h>
23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h>
25#include <linux/compiler.h>
26
27#include "tick-internal.h"
28#include "ntp_internal.h"
29#include "timekeeping_internal.h"
30
31#define TK_CLEAR_NTP (1 << 0)
32#define TK_MIRROR (1 << 1)
33#define TK_CLOCK_WAS_SET (1 << 2)
34
35
36
37
38
39static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42} tk_core ____cacheline_aligned;
43
44static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45static struct timekeeper shadow_timekeeper;
46
47
48
49
50
51
52
53
54
55
56struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59};
60
61static struct tk_fast tk_fast_mono ____cacheline_aligned;
62static struct tk_fast tk_fast_raw ____cacheline_aligned;
63
64
65int __read_mostly timekeeping_suspended;
66
67static inline void tk_normalize_xtime(struct timekeeper *tk)
68{
69 while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
70 tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
71 tk->xtime_sec++;
72 }
73}
74
75static inline struct timespec64 tk_xtime(struct timekeeper *tk)
76{
77 struct timespec64 ts;
78
79 ts.tv_sec = tk->xtime_sec;
80 ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
81 return ts;
82}
83
84static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
85{
86 tk->xtime_sec = ts->tv_sec;
87 tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
88}
89
90static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
91{
92 tk->xtime_sec += ts->tv_sec;
93 tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
94 tk_normalize_xtime(tk);
95}
96
97static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
98{
99 struct timespec64 tmp;
100
101
102
103
104
105 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
106 -tk->wall_to_monotonic.tv_nsec);
107 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
108 tk->wall_to_monotonic = wtm;
109 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
110 tk->offs_real = timespec64_to_ktime(tmp);
111 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
112}
113
114static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
115{
116 tk->offs_boot = ktime_add(tk->offs_boot, delta);
117}
118
119#ifdef CONFIG_DEBUG_TIMEKEEPING
120#define WARNING_FREQ (HZ*300)
121
122static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
123{
124
125 cycle_t max_cycles = tk->tkr_mono.clock->max_cycles;
126 const char *name = tk->tkr_mono.clock->name;
127
128 if (offset > max_cycles) {
129 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
130 offset, name, max_cycles);
131 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
132 } else {
133 if (offset > (max_cycles >> 1)) {
134 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n",
135 offset, name, max_cycles >> 1);
136 printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
137 }
138 }
139
140 if (tk->underflow_seen) {
141 if (jiffies - tk->last_warning > WARNING_FREQ) {
142 printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
143 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
144 printk_deferred(" Your kernel is probably still fine.\n");
145 tk->last_warning = jiffies;
146 }
147 tk->underflow_seen = 0;
148 }
149
150 if (tk->overflow_seen) {
151 if (jiffies - tk->last_warning > WARNING_FREQ) {
152 printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
153 printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
154 printk_deferred(" Your kernel is probably still fine.\n");
155 tk->last_warning = jiffies;
156 }
157 tk->overflow_seen = 0;
158 }
159}
160
161static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
162{
163 struct timekeeper *tk = &tk_core.timekeeper;
164 cycle_t now, last, mask, max, delta;
165 unsigned int seq;
166
167
168
169
170
171
172
173
174 do {
175 seq = read_seqcount_begin(&tk_core.seq);
176 now = tkr->read(tkr->clock);
177 last = tkr->cycle_last;
178 mask = tkr->mask;
179 max = tkr->clock->max_cycles;
180 } while (read_seqcount_retry(&tk_core.seq, seq));
181
182 delta = clocksource_delta(now, last, mask);
183
184
185
186
187
188 if (unlikely((~delta & mask) < (mask >> 3))) {
189 tk->underflow_seen = 1;
190 delta = 0;
191 }
192
193
194 if (unlikely(delta > max)) {
195 tk->overflow_seen = 1;
196 delta = tkr->clock->max_cycles;
197 }
198
199 return delta;
200}
201#else
202static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
203{
204}
205static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
206{
207 cycle_t cycle_now, delta;
208
209
210 cycle_now = tkr->read(tkr->clock);
211
212
213 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
214
215 return delta;
216}
217#endif
218
219
220
221
222
223
224
225
226
227
228
229
230static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
231{
232 cycle_t interval;
233 u64 tmp, ntpinterval;
234 struct clocksource *old_clock;
235
236 old_clock = tk->tkr_mono.clock;
237 tk->tkr_mono.clock = clock;
238 tk->tkr_mono.read = clock->read;
239 tk->tkr_mono.mask = clock->mask;
240 tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
241
242 tk->tkr_raw.clock = clock;
243 tk->tkr_raw.read = clock->read;
244 tk->tkr_raw.mask = clock->mask;
245 tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
246
247
248 tmp = NTP_INTERVAL_LENGTH;
249 tmp <<= clock->shift;
250 ntpinterval = tmp;
251 tmp += clock->mult/2;
252 do_div(tmp, clock->mult);
253 if (tmp == 0)
254 tmp = 1;
255
256 interval = (cycle_t) tmp;
257 tk->cycle_interval = interval;
258
259
260 tk->xtime_interval = (u64) interval * clock->mult;
261 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
262 tk->raw_interval =
263 ((u64) interval * clock->mult) >> clock->shift;
264
265
266 if (old_clock) {
267 int shift_change = clock->shift - old_clock->shift;
268 if (shift_change < 0)
269 tk->tkr_mono.xtime_nsec >>= -shift_change;
270 else
271 tk->tkr_mono.xtime_nsec <<= shift_change;
272 }
273 tk->tkr_raw.xtime_nsec = 0;
274
275 tk->tkr_mono.shift = clock->shift;
276 tk->tkr_raw.shift = clock->shift;
277
278 tk->ntp_error = 0;
279 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
280 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
281
282
283
284
285
286
287 tk->tkr_mono.mult = clock->mult;
288 tk->tkr_raw.mult = clock->mult;
289 tk->ntp_err_mult = 0;
290}
291
292
293
294#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
295static u32 default_arch_gettimeoffset(void) { return 0; }
296u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
297#else
298static inline u32 arch_gettimeoffset(void) { return 0; }
299#endif
300
301static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
302{
303 cycle_t delta;
304 s64 nsec;
305
306 delta = timekeeping_get_delta(tkr);
307
308 nsec = delta * tkr->mult + tkr->xtime_nsec;
309 nsec >>= tkr->shift;
310
311
312 return nsec + arch_gettimeoffset();
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static void update_fast_timekeeper(struct tk_read_base *tkr, struct tk_fast *tkf)
330{
331 struct tk_read_base *base = tkf->base;
332
333
334 raw_write_seqcount_latch(&tkf->seq);
335
336
337 memcpy(base, tkr, sizeof(*base));
338
339
340 raw_write_seqcount_latch(&tkf->seq);
341
342
343 memcpy(base + 1, base, sizeof(*base));
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
379{
380 struct tk_read_base *tkr;
381 unsigned int seq;
382 u64 now;
383
384 do {
385 seq = raw_read_seqcount_latch(&tkf->seq);
386 tkr = tkf->base + (seq & 0x01);
387 now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
388 } while (read_seqcount_retry(&tkf->seq, seq));
389
390 return now;
391}
392
393u64 ktime_get_mono_fast_ns(void)
394{
395 return __ktime_get_fast_ns(&tk_fast_mono);
396}
397EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
398
399u64 ktime_get_raw_fast_ns(void)
400{
401 return __ktime_get_fast_ns(&tk_fast_raw);
402}
403EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
404
405
406static cycle_t cycles_at_suspend;
407
408static cycle_t dummy_clock_read(struct clocksource *cs)
409{
410 return cycles_at_suspend;
411}
412
413
414
415
416
417
418
419
420
421
422
423static void halt_fast_timekeeper(struct timekeeper *tk)
424{
425 static struct tk_read_base tkr_dummy;
426 struct tk_read_base *tkr = &tk->tkr_mono;
427
428 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
429 cycles_at_suspend = tkr->read(tkr->clock);
430 tkr_dummy.read = dummy_clock_read;
431 update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
432
433 tkr = &tk->tkr_raw;
434 memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
435 tkr_dummy.read = dummy_clock_read;
436 update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
437}
438
439#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
440
441static inline void update_vsyscall(struct timekeeper *tk)
442{
443 struct timespec xt, wm;
444
445 xt = timespec64_to_timespec(tk_xtime(tk));
446 wm = timespec64_to_timespec(tk->wall_to_monotonic);
447 update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
448 tk->tkr_mono.cycle_last);
449}
450
451static inline void old_vsyscall_fixup(struct timekeeper *tk)
452{
453 s64 remainder;
454
455
456
457
458
459
460
461
462
463
464
465 remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
466 tk->tkr_mono.xtime_nsec -= remainder;
467 tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
468 tk->ntp_error += remainder << tk->ntp_error_shift;
469 tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
470}
471#else
472#define old_vsyscall_fixup(tk)
473#endif
474
475static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
476
477static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
478{
479 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
480}
481
482
483
484
485int pvclock_gtod_register_notifier(struct notifier_block *nb)
486{
487 struct timekeeper *tk = &tk_core.timekeeper;
488 unsigned long flags;
489 int ret;
490
491 raw_spin_lock_irqsave(&timekeeper_lock, flags);
492 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
493 update_pvclock_gtod(tk, true);
494 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
495
496 return ret;
497}
498EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
499
500
501
502
503
504int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
505{
506 unsigned long flags;
507 int ret;
508
509 raw_spin_lock_irqsave(&timekeeper_lock, flags);
510 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
511 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
512
513 return ret;
514}
515EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
516
517
518
519
520static inline void tk_update_leap_state(struct timekeeper *tk)
521{
522 tk->next_leap_ktime = ntp_get_next_leap();
523 if (tk->next_leap_ktime.tv64 != KTIME_MAX)
524
525 tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
526}
527
528
529
530
531static inline void tk_update_ktime_data(struct timekeeper *tk)
532{
533 u64 seconds;
534 u32 nsec;
535
536
537
538
539
540
541
542
543 seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
544 nsec = (u32) tk->wall_to_monotonic.tv_nsec;
545 tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
546
547
548 tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
549
550
551
552
553
554
555 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
556 if (nsec >= NSEC_PER_SEC)
557 seconds++;
558 tk->ktime_sec = seconds;
559}
560
561
562static void timekeeping_update(struct timekeeper *tk, unsigned int action)
563{
564 if (action & TK_CLEAR_NTP) {
565 tk->ntp_error = 0;
566 ntp_clear();
567 }
568
569 tk_update_leap_state(tk);
570 tk_update_ktime_data(tk);
571
572 update_vsyscall(tk);
573 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
574
575 update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
576 update_fast_timekeeper(&tk->tkr_raw, &tk_fast_raw);
577
578 if (action & TK_CLOCK_WAS_SET)
579 tk->clock_was_set_seq++;
580
581
582
583
584
585 if (action & TK_MIRROR)
586 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
587 sizeof(tk_core.timekeeper));
588}
589
590
591
592
593
594
595
596
597static void timekeeping_forward_now(struct timekeeper *tk)
598{
599 struct clocksource *clock = tk->tkr_mono.clock;
600 cycle_t cycle_now, delta;
601 s64 nsec;
602
603 cycle_now = tk->tkr_mono.read(clock);
604 delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
605 tk->tkr_mono.cycle_last = cycle_now;
606 tk->tkr_raw.cycle_last = cycle_now;
607
608 tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
609
610
611 tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
612
613 tk_normalize_xtime(tk);
614
615 nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
616 timespec64_add_ns(&tk->raw_time, nsec);
617}
618
619
620
621
622
623
624
625
626int __getnstimeofday64(struct timespec64 *ts)
627{
628 struct timekeeper *tk = &tk_core.timekeeper;
629 unsigned long seq;
630 s64 nsecs = 0;
631
632 do {
633 seq = read_seqcount_begin(&tk_core.seq);
634
635 ts->tv_sec = tk->xtime_sec;
636 nsecs = timekeeping_get_ns(&tk->tkr_mono);
637
638 } while (read_seqcount_retry(&tk_core.seq, seq));
639
640 ts->tv_nsec = 0;
641 timespec64_add_ns(ts, nsecs);
642
643
644
645
646
647 if (unlikely(timekeeping_suspended))
648 return -EAGAIN;
649 return 0;
650}
651EXPORT_SYMBOL(__getnstimeofday64);
652
653
654
655
656
657
658
659void getnstimeofday64(struct timespec64 *ts)
660{
661 WARN_ON(__getnstimeofday64(ts));
662}
663EXPORT_SYMBOL(getnstimeofday64);
664
665ktime_t ktime_get(void)
666{
667 struct timekeeper *tk = &tk_core.timekeeper;
668 unsigned int seq;
669 ktime_t base;
670 s64 nsecs;
671
672 WARN_ON(timekeeping_suspended);
673
674 do {
675 seq = read_seqcount_begin(&tk_core.seq);
676 base = tk->tkr_mono.base;
677 nsecs = timekeeping_get_ns(&tk->tkr_mono);
678
679 } while (read_seqcount_retry(&tk_core.seq, seq));
680
681 return ktime_add_ns(base, nsecs);
682}
683EXPORT_SYMBOL_GPL(ktime_get);
684
685u32 ktime_get_resolution_ns(void)
686{
687 struct timekeeper *tk = &tk_core.timekeeper;
688 unsigned int seq;
689 u32 nsecs;
690
691 WARN_ON(timekeeping_suspended);
692
693 do {
694 seq = read_seqcount_begin(&tk_core.seq);
695 nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
696 } while (read_seqcount_retry(&tk_core.seq, seq));
697
698 return nsecs;
699}
700EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
701
702static ktime_t *offsets[TK_OFFS_MAX] = {
703 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
704 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
705 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
706};
707
708ktime_t ktime_get_with_offset(enum tk_offsets offs)
709{
710 struct timekeeper *tk = &tk_core.timekeeper;
711 unsigned int seq;
712 ktime_t base, *offset = offsets[offs];
713 s64 nsecs;
714
715 WARN_ON(timekeeping_suspended);
716
717 do {
718 seq = read_seqcount_begin(&tk_core.seq);
719 base = ktime_add(tk->tkr_mono.base, *offset);
720 nsecs = timekeeping_get_ns(&tk->tkr_mono);
721
722 } while (read_seqcount_retry(&tk_core.seq, seq));
723
724 return ktime_add_ns(base, nsecs);
725
726}
727EXPORT_SYMBOL_GPL(ktime_get_with_offset);
728
729
730
731
732
733
734ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
735{
736 ktime_t *offset = offsets[offs];
737 unsigned long seq;
738 ktime_t tconv;
739
740 do {
741 seq = read_seqcount_begin(&tk_core.seq);
742 tconv = ktime_add(tmono, *offset);
743 } while (read_seqcount_retry(&tk_core.seq, seq));
744
745 return tconv;
746}
747EXPORT_SYMBOL_GPL(ktime_mono_to_any);
748
749
750
751
752ktime_t ktime_get_raw(void)
753{
754 struct timekeeper *tk = &tk_core.timekeeper;
755 unsigned int seq;
756 ktime_t base;
757 s64 nsecs;
758
759 do {
760 seq = read_seqcount_begin(&tk_core.seq);
761 base = tk->tkr_raw.base;
762 nsecs = timekeeping_get_ns(&tk->tkr_raw);
763
764 } while (read_seqcount_retry(&tk_core.seq, seq));
765
766 return ktime_add_ns(base, nsecs);
767}
768EXPORT_SYMBOL_GPL(ktime_get_raw);
769
770
771
772
773
774
775
776
777
778void ktime_get_ts64(struct timespec64 *ts)
779{
780 struct timekeeper *tk = &tk_core.timekeeper;
781 struct timespec64 tomono;
782 s64 nsec;
783 unsigned int seq;
784
785 WARN_ON(timekeeping_suspended);
786
787 do {
788 seq = read_seqcount_begin(&tk_core.seq);
789 ts->tv_sec = tk->xtime_sec;
790 nsec = timekeeping_get_ns(&tk->tkr_mono);
791 tomono = tk->wall_to_monotonic;
792
793 } while (read_seqcount_retry(&tk_core.seq, seq));
794
795 ts->tv_sec += tomono.tv_sec;
796 ts->tv_nsec = 0;
797 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
798}
799EXPORT_SYMBOL_GPL(ktime_get_ts64);
800
801
802
803
804
805
806
807
808
809
810time64_t ktime_get_seconds(void)
811{
812 struct timekeeper *tk = &tk_core.timekeeper;
813
814 WARN_ON(timekeeping_suspended);
815 return tk->ktime_sec;
816}
817EXPORT_SYMBOL_GPL(ktime_get_seconds);
818
819
820
821
822
823
824
825
826
827
828
829
830time64_t ktime_get_real_seconds(void)
831{
832 struct timekeeper *tk = &tk_core.timekeeper;
833 time64_t seconds;
834 unsigned int seq;
835
836 if (IS_ENABLED(CONFIG_64BIT))
837 return tk->xtime_sec;
838
839 do {
840 seq = read_seqcount_begin(&tk_core.seq);
841 seconds = tk->xtime_sec;
842
843 } while (read_seqcount_retry(&tk_core.seq, seq));
844
845 return seconds;
846}
847EXPORT_SYMBOL_GPL(ktime_get_real_seconds);
848
849#ifdef CONFIG_NTP_PPS
850
851
852
853
854
855
856
857
858
859
860void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
861{
862 struct timekeeper *tk = &tk_core.timekeeper;
863 unsigned long seq;
864 s64 nsecs_raw, nsecs_real;
865
866 WARN_ON_ONCE(timekeeping_suspended);
867
868 do {
869 seq = read_seqcount_begin(&tk_core.seq);
870
871 *ts_raw = timespec64_to_timespec(tk->raw_time);
872 ts_real->tv_sec = tk->xtime_sec;
873 ts_real->tv_nsec = 0;
874
875 nsecs_raw = timekeeping_get_ns(&tk->tkr_raw);
876 nsecs_real = timekeeping_get_ns(&tk->tkr_mono);
877
878 } while (read_seqcount_retry(&tk_core.seq, seq));
879
880 timespec_add_ns(ts_raw, nsecs_raw);
881 timespec_add_ns(ts_real, nsecs_real);
882}
883EXPORT_SYMBOL(getnstime_raw_and_real);
884
885#endif
886
887
888
889
890
891
892
893void do_gettimeofday(struct timeval *tv)
894{
895 struct timespec64 now;
896
897 getnstimeofday64(&now);
898 tv->tv_sec = now.tv_sec;
899 tv->tv_usec = now.tv_nsec/1000;
900}
901EXPORT_SYMBOL(do_gettimeofday);
902
903
904
905
906
907
908
909int do_settimeofday64(const struct timespec64 *ts)
910{
911 struct timekeeper *tk = &tk_core.timekeeper;
912 struct timespec64 ts_delta, xt;
913 unsigned long flags;
914
915 if (!timespec64_valid_strict(ts))
916 return -EINVAL;
917
918 raw_spin_lock_irqsave(&timekeeper_lock, flags);
919 write_seqcount_begin(&tk_core.seq);
920
921 timekeeping_forward_now(tk);
922
923 xt = tk_xtime(tk);
924 ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
925 ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
926
927 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
928
929 tk_set_xtime(tk, ts);
930
931 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
932
933 write_seqcount_end(&tk_core.seq);
934 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
935
936
937 clock_was_set();
938
939 return 0;
940}
941EXPORT_SYMBOL(do_settimeofday64);
942
943
944
945
946
947
948
949int timekeeping_inject_offset(struct timespec *ts)
950{
951 struct timekeeper *tk = &tk_core.timekeeper;
952 unsigned long flags;
953 struct timespec64 ts64, tmp;
954 int ret = 0;
955
956 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
957 return -EINVAL;
958
959 ts64 = timespec_to_timespec64(*ts);
960
961 raw_spin_lock_irqsave(&timekeeper_lock, flags);
962 write_seqcount_begin(&tk_core.seq);
963
964 timekeeping_forward_now(tk);
965
966
967 tmp = timespec64_add(tk_xtime(tk), ts64);
968 if (!timespec64_valid_strict(&tmp)) {
969 ret = -EINVAL;
970 goto error;
971 }
972
973 tk_xtime_add(tk, &ts64);
974 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
975
976error:
977 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
978
979 write_seqcount_end(&tk_core.seq);
980 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
981
982
983 clock_was_set();
984
985 return ret;
986}
987EXPORT_SYMBOL(timekeeping_inject_offset);
988
989
990
991
992
993
994s32 timekeeping_get_tai_offset(void)
995{
996 struct timekeeper *tk = &tk_core.timekeeper;
997 unsigned int seq;
998 s32 ret;
999
1000 do {
1001 seq = read_seqcount_begin(&tk_core.seq);
1002 ret = tk->tai_offset;
1003 } while (read_seqcount_retry(&tk_core.seq, seq));
1004
1005 return ret;
1006}
1007
1008
1009
1010
1011
1012static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
1013{
1014 tk->tai_offset = tai_offset;
1015 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
1016}
1017
1018
1019
1020
1021
1022void timekeeping_set_tai_offset(s32 tai_offset)
1023{
1024 struct timekeeper *tk = &tk_core.timekeeper;
1025 unsigned long flags;
1026
1027 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1028 write_seqcount_begin(&tk_core.seq);
1029 __timekeeping_set_tai_offset(tk, tai_offset);
1030 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1031 write_seqcount_end(&tk_core.seq);
1032 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1033 clock_was_set();
1034}
1035
1036
1037
1038
1039
1040
1041static int change_clocksource(void *data)
1042{
1043 struct timekeeper *tk = &tk_core.timekeeper;
1044 struct clocksource *new, *old;
1045 unsigned long flags;
1046
1047 new = (struct clocksource *) data;
1048
1049 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1050 write_seqcount_begin(&tk_core.seq);
1051
1052 timekeeping_forward_now(tk);
1053
1054
1055
1056
1057 if (try_module_get(new->owner)) {
1058 if (!new->enable || new->enable(new) == 0) {
1059 old = tk->tkr_mono.clock;
1060 tk_setup_internals(tk, new);
1061 if (old->disable)
1062 old->disable(old);
1063 module_put(old->owner);
1064 } else {
1065 module_put(new->owner);
1066 }
1067 }
1068 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1069
1070 write_seqcount_end(&tk_core.seq);
1071 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1072
1073 return 0;
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083int timekeeping_notify(struct clocksource *clock)
1084{
1085 struct timekeeper *tk = &tk_core.timekeeper;
1086
1087 if (tk->tkr_mono.clock == clock)
1088 return 0;
1089 stop_machine(change_clocksource, clock, NULL);
1090 tick_clock_notify();
1091 return tk->tkr_mono.clock == clock ? 0 : -1;
1092}
1093
1094
1095
1096
1097
1098
1099
1100void getrawmonotonic64(struct timespec64 *ts)
1101{
1102 struct timekeeper *tk = &tk_core.timekeeper;
1103 struct timespec64 ts64;
1104 unsigned long seq;
1105 s64 nsecs;
1106
1107 do {
1108 seq = read_seqcount_begin(&tk_core.seq);
1109 nsecs = timekeeping_get_ns(&tk->tkr_raw);
1110 ts64 = tk->raw_time;
1111
1112 } while (read_seqcount_retry(&tk_core.seq, seq));
1113
1114 timespec64_add_ns(&ts64, nsecs);
1115 *ts = ts64;
1116}
1117EXPORT_SYMBOL(getrawmonotonic64);
1118
1119
1120
1121
1122
1123int timekeeping_valid_for_hres(void)
1124{
1125 struct timekeeper *tk = &tk_core.timekeeper;
1126 unsigned long seq;
1127 int ret;
1128
1129 do {
1130 seq = read_seqcount_begin(&tk_core.seq);
1131
1132 ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
1133
1134 } while (read_seqcount_retry(&tk_core.seq, seq));
1135
1136 return ret;
1137}
1138
1139
1140
1141
1142u64 timekeeping_max_deferment(void)
1143{
1144 struct timekeeper *tk = &tk_core.timekeeper;
1145 unsigned long seq;
1146 u64 ret;
1147
1148 do {
1149 seq = read_seqcount_begin(&tk_core.seq);
1150
1151 ret = tk->tkr_mono.clock->max_idle_ns;
1152
1153 } while (read_seqcount_retry(&tk_core.seq, seq));
1154
1155 return ret;
1156}
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167void __weak read_persistent_clock(struct timespec *ts)
1168{
1169 ts->tv_sec = 0;
1170 ts->tv_nsec = 0;
1171}
1172
1173void __weak read_persistent_clock64(struct timespec64 *ts64)
1174{
1175 struct timespec ts;
1176
1177 read_persistent_clock(&ts);
1178 *ts64 = timespec_to_timespec64(ts);
1179}
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190void __weak read_boot_clock64(struct timespec64 *ts)
1191{
1192 ts->tv_sec = 0;
1193 ts->tv_nsec = 0;
1194}
1195
1196
1197static bool sleeptime_injected;
1198
1199
1200static bool persistent_clock_exists;
1201
1202
1203
1204
1205void __init timekeeping_init(void)
1206{
1207 struct timekeeper *tk = &tk_core.timekeeper;
1208 struct clocksource *clock;
1209 unsigned long flags;
1210 struct timespec64 now, boot, tmp;
1211
1212 read_persistent_clock64(&now);
1213 if (!timespec64_valid_strict(&now)) {
1214 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1215 " Check your CMOS/BIOS settings.\n");
1216 now.tv_sec = 0;
1217 now.tv_nsec = 0;
1218 } else if (now.tv_sec || now.tv_nsec)
1219 persistent_clock_exists = true;
1220
1221 read_boot_clock64(&boot);
1222 if (!timespec64_valid_strict(&boot)) {
1223 pr_warn("WARNING: Boot clock returned invalid value!\n"
1224 " Check your CMOS/BIOS settings.\n");
1225 boot.tv_sec = 0;
1226 boot.tv_nsec = 0;
1227 }
1228
1229 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1230 write_seqcount_begin(&tk_core.seq);
1231 ntp_init();
1232
1233 clock = clocksource_default_clock();
1234 if (clock->enable)
1235 clock->enable(clock);
1236 tk_setup_internals(tk, clock);
1237
1238 tk_set_xtime(tk, &now);
1239 tk->raw_time.tv_sec = 0;
1240 tk->raw_time.tv_nsec = 0;
1241 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1242 boot = tk_xtime(tk);
1243
1244 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1245 tk_set_wall_to_mono(tk, tmp);
1246
1247 timekeeping_update(tk, TK_MIRROR);
1248
1249 write_seqcount_end(&tk_core.seq);
1250 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1251}
1252
1253
1254static struct timespec64 timekeeping_suspend_time;
1255
1256
1257
1258
1259
1260
1261
1262
1263static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1264 struct timespec64 *delta)
1265{
1266 if (!timespec64_valid_strict(delta)) {
1267 printk_deferred(KERN_WARNING
1268 "__timekeeping_inject_sleeptime: Invalid "
1269 "sleep delta value!\n");
1270 return;
1271 }
1272 tk_xtime_add(tk, delta);
1273 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1274 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1275 tk_debug_account_sleep_time(delta);
1276}
1277
1278#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295bool timekeeping_rtc_skipresume(void)
1296{
1297 return sleeptime_injected;
1298}
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309bool timekeeping_rtc_skipsuspend(void)
1310{
1311 return persistent_clock_exists;
1312}
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325void timekeeping_inject_sleeptime64(struct timespec64 *delta)
1326{
1327 struct timekeeper *tk = &tk_core.timekeeper;
1328 unsigned long flags;
1329
1330 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1331 write_seqcount_begin(&tk_core.seq);
1332
1333 timekeeping_forward_now(tk);
1334
1335 __timekeeping_inject_sleeptime(tk, delta);
1336
1337 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1338
1339 write_seqcount_end(&tk_core.seq);
1340 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1341
1342
1343 clock_was_set();
1344}
1345#endif
1346
1347
1348
1349
1350void timekeeping_resume(void)
1351{
1352 struct timekeeper *tk = &tk_core.timekeeper;
1353 struct clocksource *clock = tk->tkr_mono.clock;
1354 unsigned long flags;
1355 struct timespec64 ts_new, ts_delta;
1356 cycle_t cycle_now, cycle_delta;
1357
1358 sleeptime_injected = false;
1359 read_persistent_clock64(&ts_new);
1360
1361 clockevents_resume();
1362 clocksource_resume();
1363
1364 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1365 write_seqcount_begin(&tk_core.seq);
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379 cycle_now = tk->tkr_mono.read(clock);
1380 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1381 cycle_now > tk->tkr_mono.cycle_last) {
1382 u64 num, max = ULLONG_MAX;
1383 u32 mult = clock->mult;
1384 u32 shift = clock->shift;
1385 s64 nsec = 0;
1386
1387 cycle_delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last,
1388 tk->tkr_mono.mask);
1389
1390
1391
1392
1393
1394
1395 do_div(max, mult);
1396 if (cycle_delta > max) {
1397 num = div64_u64(cycle_delta, max);
1398 nsec = (((u64) max * mult) >> shift) * num;
1399 cycle_delta -= num * max;
1400 }
1401 nsec += ((u64) cycle_delta * mult) >> shift;
1402
1403 ts_delta = ns_to_timespec64(nsec);
1404 sleeptime_injected = true;
1405 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1406 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1407 sleeptime_injected = true;
1408 }
1409
1410 if (sleeptime_injected)
1411 __timekeeping_inject_sleeptime(tk, &ts_delta);
1412
1413
1414 tk->tkr_mono.cycle_last = cycle_now;
1415 tk->tkr_raw.cycle_last = cycle_now;
1416
1417 tk->ntp_error = 0;
1418 timekeeping_suspended = 0;
1419 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1420 write_seqcount_end(&tk_core.seq);
1421 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1422
1423 touch_softlockup_watchdog();
1424
1425 tick_resume();
1426 hrtimers_resume();
1427}
1428
1429int timekeeping_suspend(void)
1430{
1431 struct timekeeper *tk = &tk_core.timekeeper;
1432 unsigned long flags;
1433 struct timespec64 delta, delta_delta;
1434 static struct timespec64 old_delta;
1435
1436 read_persistent_clock64(&timekeeping_suspend_time);
1437
1438
1439
1440
1441
1442
1443 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1444 persistent_clock_exists = true;
1445
1446 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1447 write_seqcount_begin(&tk_core.seq);
1448 timekeeping_forward_now(tk);
1449 timekeeping_suspended = 1;
1450
1451 if (persistent_clock_exists) {
1452
1453
1454
1455
1456
1457
1458 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1459 delta_delta = timespec64_sub(delta, old_delta);
1460 if (abs(delta_delta.tv_sec) >= 2) {
1461
1462
1463
1464
1465 old_delta = delta;
1466 } else {
1467
1468 timekeeping_suspend_time =
1469 timespec64_add(timekeeping_suspend_time, delta_delta);
1470 }
1471 }
1472
1473 timekeeping_update(tk, TK_MIRROR);
1474 halt_fast_timekeeper(tk);
1475 write_seqcount_end(&tk_core.seq);
1476 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1477
1478 tick_suspend();
1479 clocksource_suspend();
1480 clockevents_suspend();
1481
1482 return 0;
1483}
1484
1485
1486static struct syscore_ops timekeeping_syscore_ops = {
1487 .resume = timekeeping_resume,
1488 .suspend = timekeeping_suspend,
1489};
1490
1491static int __init timekeeping_init_ops(void)
1492{
1493 register_syscore_ops(&timekeeping_syscore_ops);
1494 return 0;
1495}
1496device_initcall(timekeeping_init_ops);
1497
1498
1499
1500
1501static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1502 s64 offset,
1503 bool negative,
1504 int adj_scale)
1505{
1506 s64 interval = tk->cycle_interval;
1507 s32 mult_adj = 1;
1508
1509 if (negative) {
1510 mult_adj = -mult_adj;
1511 interval = -interval;
1512 offset = -offset;
1513 }
1514 mult_adj <<= adj_scale;
1515 interval <<= adj_scale;
1516 offset <<= adj_scale;
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567 if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
1568
1569 WARN_ON_ONCE(1);
1570 return;
1571 }
1572
1573 tk->tkr_mono.mult += mult_adj;
1574 tk->xtime_interval += interval;
1575 tk->tkr_mono.xtime_nsec -= offset;
1576 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1577}
1578
1579
1580
1581
1582
1583static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1584 s64 offset)
1585{
1586 s64 interval = tk->cycle_interval;
1587 s64 xinterval = tk->xtime_interval;
1588 s64 tick_error;
1589 bool negative;
1590 u32 adj;
1591
1592
1593 if (tk->ntp_err_mult)
1594 xinterval -= tk->cycle_interval;
1595
1596 tk->ntp_tick = ntp_tick_length();
1597
1598
1599 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1600 tick_error -= (xinterval + tk->xtime_remainder);
1601
1602
1603 if (likely((tick_error >= 0) && (tick_error <= interval)))
1604 return;
1605
1606
1607 negative = (tick_error < 0);
1608
1609
1610 tick_error = abs(tick_error);
1611 for (adj = 0; tick_error > interval; adj++)
1612 tick_error >>= 1;
1613
1614
1615 timekeeping_apply_adjustment(tk, offset, negative, adj);
1616}
1617
1618
1619
1620
1621
1622static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1623{
1624
1625 timekeeping_freqadjust(tk, offset);
1626
1627
1628 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1629 tk->ntp_err_mult = 1;
1630 timekeeping_apply_adjustment(tk, offset, 0, 0);
1631 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1632
1633 timekeeping_apply_adjustment(tk, offset, 1, 0);
1634 tk->ntp_err_mult = 0;
1635 }
1636
1637 if (unlikely(tk->tkr_mono.clock->maxadj &&
1638 (abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
1639 > tk->tkr_mono.clock->maxadj))) {
1640 printk_once(KERN_WARNING
1641 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1642 tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
1643 (long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
1644 }
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
1661 s64 neg = -(s64)tk->tkr_mono.xtime_nsec;
1662 tk->tkr_mono.xtime_nsec = 0;
1663 tk->ntp_error += neg << tk->ntp_error_shift;
1664 }
1665}
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1676{
1677 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
1678 unsigned int clock_set = 0;
1679
1680 while (tk->tkr_mono.xtime_nsec >= nsecps) {
1681 int leap;
1682
1683 tk->tkr_mono.xtime_nsec -= nsecps;
1684 tk->xtime_sec++;
1685
1686
1687 leap = second_overflow(tk->xtime_sec);
1688 if (unlikely(leap)) {
1689 struct timespec64 ts;
1690
1691 tk->xtime_sec += leap;
1692
1693 ts.tv_sec = leap;
1694 ts.tv_nsec = 0;
1695 tk_set_wall_to_mono(tk,
1696 timespec64_sub(tk->wall_to_monotonic, ts));
1697
1698 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1699
1700 clock_set = TK_CLOCK_WAS_SET;
1701 }
1702 }
1703 return clock_set;
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1716 u32 shift,
1717 unsigned int *clock_set)
1718{
1719 cycle_t interval = tk->cycle_interval << shift;
1720 u64 raw_nsecs;
1721
1722
1723 if (offset < interval)
1724 return offset;
1725
1726
1727 offset -= interval;
1728 tk->tkr_mono.cycle_last += interval;
1729 tk->tkr_raw.cycle_last += interval;
1730
1731 tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
1732 *clock_set |= accumulate_nsecs_to_secs(tk);
1733
1734
1735 raw_nsecs = (u64)tk->raw_interval << shift;
1736 raw_nsecs += tk->raw_time.tv_nsec;
1737 if (raw_nsecs >= NSEC_PER_SEC) {
1738 u64 raw_secs = raw_nsecs;
1739 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1740 tk->raw_time.tv_sec += raw_secs;
1741 }
1742 tk->raw_time.tv_nsec = raw_nsecs;
1743
1744
1745 tk->ntp_error += tk->ntp_tick << shift;
1746 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1747 (tk->ntp_error_shift + shift);
1748
1749 return offset;
1750}
1751
1752
1753
1754
1755
1756void update_wall_time(void)
1757{
1758 struct timekeeper *real_tk = &tk_core.timekeeper;
1759 struct timekeeper *tk = &shadow_timekeeper;
1760 cycle_t offset;
1761 int shift = 0, maxshift;
1762 unsigned int clock_set = 0;
1763 unsigned long flags;
1764
1765 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1766
1767
1768 if (unlikely(timekeeping_suspended))
1769 goto out;
1770
1771#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1772 offset = real_tk->cycle_interval;
1773#else
1774 offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
1775 tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
1776#endif
1777
1778
1779 if (offset < real_tk->cycle_interval)
1780 goto out;
1781
1782
1783 timekeeping_check_update(real_tk, offset);
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1794 shift = max(0, shift);
1795
1796 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1797 shift = min(shift, maxshift);
1798 while (offset >= tk->cycle_interval) {
1799 offset = logarithmic_accumulation(tk, offset, shift,
1800 &clock_set);
1801 if (offset < tk->cycle_interval<<shift)
1802 shift--;
1803 }
1804
1805
1806 timekeeping_adjust(tk, offset);
1807
1808
1809
1810
1811
1812 old_vsyscall_fixup(tk);
1813
1814
1815
1816
1817
1818 clock_set |= accumulate_nsecs_to_secs(tk);
1819
1820 write_seqcount_begin(&tk_core.seq);
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831 timekeeping_update(tk, clock_set);
1832 memcpy(real_tk, tk, sizeof(*tk));
1833
1834 write_seqcount_end(&tk_core.seq);
1835out:
1836 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1837 if (clock_set)
1838
1839 clock_was_set_delayed();
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853void getboottime64(struct timespec64 *ts)
1854{
1855 struct timekeeper *tk = &tk_core.timekeeper;
1856 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
1857
1858 *ts = ktime_to_timespec64(t);
1859}
1860EXPORT_SYMBOL_GPL(getboottime64);
1861
1862unsigned long get_seconds(void)
1863{
1864 struct timekeeper *tk = &tk_core.timekeeper;
1865
1866 return tk->xtime_sec;
1867}
1868EXPORT_SYMBOL(get_seconds);
1869
1870struct timespec __current_kernel_time(void)
1871{
1872 struct timekeeper *tk = &tk_core.timekeeper;
1873
1874 return timespec64_to_timespec(tk_xtime(tk));
1875}
1876
1877struct timespec current_kernel_time(void)
1878{
1879 struct timekeeper *tk = &tk_core.timekeeper;
1880 struct timespec64 now;
1881 unsigned long seq;
1882
1883 do {
1884 seq = read_seqcount_begin(&tk_core.seq);
1885
1886 now = tk_xtime(tk);
1887 } while (read_seqcount_retry(&tk_core.seq, seq));
1888
1889 return timespec64_to_timespec(now);
1890}
1891EXPORT_SYMBOL(current_kernel_time);
1892
1893struct timespec64 get_monotonic_coarse64(void)
1894{
1895 struct timekeeper *tk = &tk_core.timekeeper;
1896 struct timespec64 now, mono;
1897 unsigned long seq;
1898
1899 do {
1900 seq = read_seqcount_begin(&tk_core.seq);
1901
1902 now = tk_xtime(tk);
1903 mono = tk->wall_to_monotonic;
1904 } while (read_seqcount_retry(&tk_core.seq, seq));
1905
1906 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
1907 now.tv_nsec + mono.tv_nsec);
1908
1909 return now;
1910}
1911
1912
1913
1914
1915void do_timer(unsigned long ticks)
1916{
1917 jiffies_64 += ticks;
1918 calc_global_load(ticks);
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
1935 ktime_t *offs_boot, ktime_t *offs_tai)
1936{
1937 struct timekeeper *tk = &tk_core.timekeeper;
1938 unsigned int seq;
1939 ktime_t base;
1940 u64 nsecs;
1941
1942 do {
1943 seq = read_seqcount_begin(&tk_core.seq);
1944
1945 base = tk->tkr_mono.base;
1946 nsecs = timekeeping_get_ns(&tk->tkr_mono);
1947 base = ktime_add_ns(base, nsecs);
1948
1949 if (*cwsseq != tk->clock_was_set_seq) {
1950 *cwsseq = tk->clock_was_set_seq;
1951 *offs_real = tk->offs_real;
1952 *offs_boot = tk->offs_boot;
1953 *offs_tai = tk->offs_tai;
1954 }
1955
1956
1957 if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64))
1958 *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
1959
1960 } while (read_seqcount_retry(&tk_core.seq, seq));
1961
1962 return base;
1963}
1964
1965
1966
1967
1968int do_adjtimex(struct timex *txc)
1969{
1970 struct timekeeper *tk = &tk_core.timekeeper;
1971 unsigned long flags;
1972 struct timespec64 ts;
1973 s32 orig_tai, tai;
1974 int ret;
1975
1976
1977 ret = ntp_validate_timex(txc);
1978 if (ret)
1979 return ret;
1980
1981 if (txc->modes & ADJ_SETOFFSET) {
1982 struct timespec delta;
1983 delta.tv_sec = txc->time.tv_sec;
1984 delta.tv_nsec = txc->time.tv_usec;
1985 if (!(txc->modes & ADJ_NANO))
1986 delta.tv_nsec *= 1000;
1987 ret = timekeeping_inject_offset(&delta);
1988 if (ret)
1989 return ret;
1990 }
1991
1992 getnstimeofday64(&ts);
1993
1994 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1995 write_seqcount_begin(&tk_core.seq);
1996
1997 orig_tai = tai = tk->tai_offset;
1998 ret = __do_adjtimex(txc, &ts, &tai);
1999
2000 if (tai != orig_tai) {
2001 __timekeeping_set_tai_offset(tk, tai);
2002 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
2003 }
2004 tk_update_leap_state(tk);
2005
2006 write_seqcount_end(&tk_core.seq);
2007 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2008
2009 if (tai != orig_tai)
2010 clock_was_set();
2011
2012 ntp_notify_cmos_timer();
2013
2014 return ret;
2015}
2016
2017#ifdef CONFIG_NTP_PPS
2018
2019
2020
2021void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
2022{
2023 unsigned long flags;
2024
2025 raw_spin_lock_irqsave(&timekeeper_lock, flags);
2026 write_seqcount_begin(&tk_core.seq);
2027
2028 __hardpps(phase_ts, raw_ts);
2029
2030 write_seqcount_end(&tk_core.seq);
2031 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
2032}
2033EXPORT_SYMBOL(hardpps);
2034#endif
2035
2036
2037
2038
2039
2040
2041
2042void xtime_update(unsigned long ticks)
2043{
2044 write_seqlock(&jiffies_lock);
2045 do_timer(ticks);
2046 write_sequnlock(&jiffies_lock);
2047 update_wall_time();
2048}
2049