1
2
3
4
5
6
7
8
9
10
11#include <linux/timekeeper_internal.h>
12#include <linux/module.h>
13#include <linux/interrupt.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/syscore_ops.h>
19#include <linux/clocksource.h>
20#include <linux/jiffies.h>
21#include <linux/time.h>
22#include <linux/tick.h>
23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h>
25#include <linux/compiler.h>
26
27#include "tick-internal.h"
28#include "ntp_internal.h"
29#include "timekeeping_internal.h"
30
31#define TK_CLEAR_NTP (1 << 0)
32#define TK_MIRROR (1 << 1)
33#define TK_CLOCK_WAS_SET (1 << 2)
34
35
36
37
38
39static struct {
40 seqcount_t seq;
41 struct timekeeper timekeeper;
42} tk_core ____cacheline_aligned;
43
44static DEFINE_RAW_SPINLOCK(timekeeper_lock);
45static struct timekeeper shadow_timekeeper;
46
47
48
49
50
51
52
53
54
55
56struct tk_fast {
57 seqcount_t seq;
58 struct tk_read_base base[2];
59};
60
61static struct tk_fast tk_fast_mono ____cacheline_aligned;
62
63
64int __read_mostly timekeeping_suspended;
65
66
67bool __read_mostly persistent_clock_exist = false;
68
69static inline void tk_normalize_xtime(struct timekeeper *tk)
70{
71 while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) {
72 tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift;
73 tk->xtime_sec++;
74 }
75}
76
77static inline struct timespec64 tk_xtime(struct timekeeper *tk)
78{
79 struct timespec64 ts;
80
81 ts.tv_sec = tk->xtime_sec;
82 ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift);
83 return ts;
84}
85
86static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
87{
88 tk->xtime_sec = ts->tv_sec;
89 tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift;
90}
91
92static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
93{
94 tk->xtime_sec += ts->tv_sec;
95 tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift;
96 tk_normalize_xtime(tk);
97}
98
99static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
100{
101 struct timespec64 tmp;
102
103
104
105
106
107 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
108 -tk->wall_to_monotonic.tv_nsec);
109 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
110 tk->wall_to_monotonic = wtm;
111 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
112 tk->offs_real = timespec64_to_ktime(tmp);
113 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
114}
115
116static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
117{
118 tk->offs_boot = ktime_add(tk->offs_boot, delta);
119}
120
121
122
123
124
125
126
127
128
129
130
131
132static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
133{
134 cycle_t interval;
135 u64 tmp, ntpinterval;
136 struct clocksource *old_clock;
137
138 old_clock = tk->tkr.clock;
139 tk->tkr.clock = clock;
140 tk->tkr.read = clock->read;
141 tk->tkr.mask = clock->mask;
142 tk->tkr.cycle_last = tk->tkr.read(clock);
143
144
145 tmp = NTP_INTERVAL_LENGTH;
146 tmp <<= clock->shift;
147 ntpinterval = tmp;
148 tmp += clock->mult/2;
149 do_div(tmp, clock->mult);
150 if (tmp == 0)
151 tmp = 1;
152
153 interval = (cycle_t) tmp;
154 tk->cycle_interval = interval;
155
156
157 tk->xtime_interval = (u64) interval * clock->mult;
158 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
159 tk->raw_interval =
160 ((u64) interval * clock->mult) >> clock->shift;
161
162
163 if (old_clock) {
164 int shift_change = clock->shift - old_clock->shift;
165 if (shift_change < 0)
166 tk->tkr.xtime_nsec >>= -shift_change;
167 else
168 tk->tkr.xtime_nsec <<= shift_change;
169 }
170 tk->tkr.shift = clock->shift;
171
172 tk->ntp_error = 0;
173 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
174 tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
175
176
177
178
179
180
181 tk->tkr.mult = clock->mult;
182 tk->ntp_err_mult = 0;
183}
184
185
186
187#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
188static u32 default_arch_gettimeoffset(void) { return 0; }
189u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
190#else
191static inline u32 arch_gettimeoffset(void) { return 0; }
192#endif
193
194static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
195{
196 cycle_t cycle_now, delta;
197 s64 nsec;
198
199
200 cycle_now = tkr->read(tkr->clock);
201
202
203 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
204
205 nsec = delta * tkr->mult + tkr->xtime_nsec;
206 nsec >>= tkr->shift;
207
208
209 return nsec + arch_gettimeoffset();
210}
211
212static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
213{
214 struct clocksource *clock = tk->tkr.clock;
215 cycle_t cycle_now, delta;
216 s64 nsec;
217
218
219 cycle_now = tk->tkr.read(clock);
220
221
222 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
223
224
225 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
226
227
228 return nsec + arch_gettimeoffset();
229}
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272static void update_fast_timekeeper(struct timekeeper *tk)
273{
274 struct tk_read_base *base = tk_fast_mono.base;
275
276
277 raw_write_seqcount_latch(&tk_fast_mono.seq);
278
279
280 memcpy(base, &tk->tkr, sizeof(*base));
281
282
283 raw_write_seqcount_latch(&tk_fast_mono.seq);
284
285
286 memcpy(base + 1, base, sizeof(*base));
287}
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321u64 notrace ktime_get_mono_fast_ns(void)
322{
323 struct tk_read_base *tkr;
324 unsigned int seq;
325 u64 now;
326
327 do {
328 seq = raw_read_seqcount(&tk_fast_mono.seq);
329 tkr = tk_fast_mono.base + (seq & 0x01);
330 now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr);
331
332 } while (read_seqcount_retry(&tk_fast_mono.seq, seq));
333 return now;
334}
335EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns);
336
337#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
338
339static inline void update_vsyscall(struct timekeeper *tk)
340{
341 struct timespec xt, wm;
342
343 xt = timespec64_to_timespec(tk_xtime(tk));
344 wm = timespec64_to_timespec(tk->wall_to_monotonic);
345 update_vsyscall_old(&xt, &wm, tk->tkr.clock, tk->tkr.mult,
346 tk->tkr.cycle_last);
347}
348
349static inline void old_vsyscall_fixup(struct timekeeper *tk)
350{
351 s64 remainder;
352
353
354
355
356
357
358
359
360
361
362
363 remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1);
364 tk->tkr.xtime_nsec -= remainder;
365 tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift;
366 tk->ntp_error += remainder << tk->ntp_error_shift;
367 tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift;
368}
369#else
370#define old_vsyscall_fixup(tk)
371#endif
372
373static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
374
375static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
376{
377 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
378}
379
380
381
382
383int pvclock_gtod_register_notifier(struct notifier_block *nb)
384{
385 struct timekeeper *tk = &tk_core.timekeeper;
386 unsigned long flags;
387 int ret;
388
389 raw_spin_lock_irqsave(&timekeeper_lock, flags);
390 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
391 update_pvclock_gtod(tk, true);
392 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
393
394 return ret;
395}
396EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
397
398
399
400
401
402int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
403{
404 unsigned long flags;
405 int ret;
406
407 raw_spin_lock_irqsave(&timekeeper_lock, flags);
408 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
409 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
410
411 return ret;
412}
413EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
414
415
416
417
418static inline void tk_update_ktime_data(struct timekeeper *tk)
419{
420 s64 nsec;
421
422
423
424
425
426
427
428
429 nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
430 nsec *= NSEC_PER_SEC;
431 nsec += tk->wall_to_monotonic.tv_nsec;
432 tk->tkr.base_mono = ns_to_ktime(nsec);
433
434
435 tk->base_raw = timespec64_to_ktime(tk->raw_time);
436}
437
438
439static void timekeeping_update(struct timekeeper *tk, unsigned int action)
440{
441 if (action & TK_CLEAR_NTP) {
442 tk->ntp_error = 0;
443 ntp_clear();
444 }
445
446 tk_update_ktime_data(tk);
447
448 update_vsyscall(tk);
449 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
450
451 if (action & TK_MIRROR)
452 memcpy(&shadow_timekeeper, &tk_core.timekeeper,
453 sizeof(tk_core.timekeeper));
454
455 update_fast_timekeeper(tk);
456}
457
458
459
460
461
462
463
464
465static void timekeeping_forward_now(struct timekeeper *tk)
466{
467 struct clocksource *clock = tk->tkr.clock;
468 cycle_t cycle_now, delta;
469 s64 nsec;
470
471 cycle_now = tk->tkr.read(clock);
472 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
473 tk->tkr.cycle_last = cycle_now;
474
475 tk->tkr.xtime_nsec += delta * tk->tkr.mult;
476
477
478 tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift;
479
480 tk_normalize_xtime(tk);
481
482 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
483 timespec64_add_ns(&tk->raw_time, nsec);
484}
485
486
487
488
489
490
491
492
493int __getnstimeofday64(struct timespec64 *ts)
494{
495 struct timekeeper *tk = &tk_core.timekeeper;
496 unsigned long seq;
497 s64 nsecs = 0;
498
499 do {
500 seq = read_seqcount_begin(&tk_core.seq);
501
502 ts->tv_sec = tk->xtime_sec;
503 nsecs = timekeeping_get_ns(&tk->tkr);
504
505 } while (read_seqcount_retry(&tk_core.seq, seq));
506
507 ts->tv_nsec = 0;
508 timespec64_add_ns(ts, nsecs);
509
510
511
512
513
514 if (unlikely(timekeeping_suspended))
515 return -EAGAIN;
516 return 0;
517}
518EXPORT_SYMBOL(__getnstimeofday64);
519
520
521
522
523
524
525
526void getnstimeofday64(struct timespec64 *ts)
527{
528 WARN_ON(__getnstimeofday64(ts));
529}
530EXPORT_SYMBOL(getnstimeofday64);
531
532ktime_t ktime_get(void)
533{
534 struct timekeeper *tk = &tk_core.timekeeper;
535 unsigned int seq;
536 ktime_t base;
537 s64 nsecs;
538
539 WARN_ON(timekeeping_suspended);
540
541 do {
542 seq = read_seqcount_begin(&tk_core.seq);
543 base = tk->tkr.base_mono;
544 nsecs = timekeeping_get_ns(&tk->tkr);
545
546 } while (read_seqcount_retry(&tk_core.seq, seq));
547
548 return ktime_add_ns(base, nsecs);
549}
550EXPORT_SYMBOL_GPL(ktime_get);
551
552static ktime_t *offsets[TK_OFFS_MAX] = {
553 [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
554 [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
555 [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
556};
557
558ktime_t ktime_get_with_offset(enum tk_offsets offs)
559{
560 struct timekeeper *tk = &tk_core.timekeeper;
561 unsigned int seq;
562 ktime_t base, *offset = offsets[offs];
563 s64 nsecs;
564
565 WARN_ON(timekeeping_suspended);
566
567 do {
568 seq = read_seqcount_begin(&tk_core.seq);
569 base = ktime_add(tk->tkr.base_mono, *offset);
570 nsecs = timekeeping_get_ns(&tk->tkr);
571
572 } while (read_seqcount_retry(&tk_core.seq, seq));
573
574 return ktime_add_ns(base, nsecs);
575
576}
577EXPORT_SYMBOL_GPL(ktime_get_with_offset);
578
579
580
581
582
583
584ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs)
585{
586 ktime_t *offset = offsets[offs];
587 unsigned long seq;
588 ktime_t tconv;
589
590 do {
591 seq = read_seqcount_begin(&tk_core.seq);
592 tconv = ktime_add(tmono, *offset);
593 } while (read_seqcount_retry(&tk_core.seq, seq));
594
595 return tconv;
596}
597EXPORT_SYMBOL_GPL(ktime_mono_to_any);
598
599
600
601
602ktime_t ktime_get_raw(void)
603{
604 struct timekeeper *tk = &tk_core.timekeeper;
605 unsigned int seq;
606 ktime_t base;
607 s64 nsecs;
608
609 do {
610 seq = read_seqcount_begin(&tk_core.seq);
611 base = tk->base_raw;
612 nsecs = timekeeping_get_ns_raw(tk);
613
614 } while (read_seqcount_retry(&tk_core.seq, seq));
615
616 return ktime_add_ns(base, nsecs);
617}
618EXPORT_SYMBOL_GPL(ktime_get_raw);
619
620
621
622
623
624
625
626
627
628void ktime_get_ts64(struct timespec64 *ts)
629{
630 struct timekeeper *tk = &tk_core.timekeeper;
631 struct timespec64 tomono;
632 s64 nsec;
633 unsigned int seq;
634
635 WARN_ON(timekeeping_suspended);
636
637 do {
638 seq = read_seqcount_begin(&tk_core.seq);
639 ts->tv_sec = tk->xtime_sec;
640 nsec = timekeeping_get_ns(&tk->tkr);
641 tomono = tk->wall_to_monotonic;
642
643 } while (read_seqcount_retry(&tk_core.seq, seq));
644
645 ts->tv_sec += tomono.tv_sec;
646 ts->tv_nsec = 0;
647 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
648}
649EXPORT_SYMBOL_GPL(ktime_get_ts64);
650
651#ifdef CONFIG_NTP_PPS
652
653
654
655
656
657
658
659
660
661
662void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
663{
664 struct timekeeper *tk = &tk_core.timekeeper;
665 unsigned long seq;
666 s64 nsecs_raw, nsecs_real;
667
668 WARN_ON_ONCE(timekeeping_suspended);
669
670 do {
671 seq = read_seqcount_begin(&tk_core.seq);
672
673 *ts_raw = timespec64_to_timespec(tk->raw_time);
674 ts_real->tv_sec = tk->xtime_sec;
675 ts_real->tv_nsec = 0;
676
677 nsecs_raw = timekeeping_get_ns_raw(tk);
678 nsecs_real = timekeeping_get_ns(&tk->tkr);
679
680 } while (read_seqcount_retry(&tk_core.seq, seq));
681
682 timespec_add_ns(ts_raw, nsecs_raw);
683 timespec_add_ns(ts_real, nsecs_real);
684}
685EXPORT_SYMBOL(getnstime_raw_and_real);
686
687#endif
688
689
690
691
692
693
694
695void do_gettimeofday(struct timeval *tv)
696{
697 struct timespec64 now;
698
699 getnstimeofday64(&now);
700 tv->tv_sec = now.tv_sec;
701 tv->tv_usec = now.tv_nsec/1000;
702}
703EXPORT_SYMBOL(do_gettimeofday);
704
705
706
707
708
709
710
711int do_settimeofday(const struct timespec *tv)
712{
713 struct timekeeper *tk = &tk_core.timekeeper;
714 struct timespec64 ts_delta, xt, tmp;
715 unsigned long flags;
716
717 if (!timespec_valid_strict(tv))
718 return -EINVAL;
719
720 raw_spin_lock_irqsave(&timekeeper_lock, flags);
721 write_seqcount_begin(&tk_core.seq);
722
723 timekeeping_forward_now(tk);
724
725 xt = tk_xtime(tk);
726 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
727 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
728
729 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
730
731 tmp = timespec_to_timespec64(*tv);
732 tk_set_xtime(tk, &tmp);
733
734 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
735
736 write_seqcount_end(&tk_core.seq);
737 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
738
739
740 clock_was_set();
741
742 return 0;
743}
744EXPORT_SYMBOL(do_settimeofday);
745
746
747
748
749
750
751
752int timekeeping_inject_offset(struct timespec *ts)
753{
754 struct timekeeper *tk = &tk_core.timekeeper;
755 unsigned long flags;
756 struct timespec64 ts64, tmp;
757 int ret = 0;
758
759 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
760 return -EINVAL;
761
762 ts64 = timespec_to_timespec64(*ts);
763
764 raw_spin_lock_irqsave(&timekeeper_lock, flags);
765 write_seqcount_begin(&tk_core.seq);
766
767 timekeeping_forward_now(tk);
768
769
770 tmp = timespec64_add(tk_xtime(tk), ts64);
771 if (!timespec64_valid_strict(&tmp)) {
772 ret = -EINVAL;
773 goto error;
774 }
775
776 tk_xtime_add(tk, &ts64);
777 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
778
779error:
780 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
781
782 write_seqcount_end(&tk_core.seq);
783 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
784
785
786 clock_was_set();
787
788 return ret;
789}
790EXPORT_SYMBOL(timekeeping_inject_offset);
791
792
793
794
795
796
797s32 timekeeping_get_tai_offset(void)
798{
799 struct timekeeper *tk = &tk_core.timekeeper;
800 unsigned int seq;
801 s32 ret;
802
803 do {
804 seq = read_seqcount_begin(&tk_core.seq);
805 ret = tk->tai_offset;
806 } while (read_seqcount_retry(&tk_core.seq, seq));
807
808 return ret;
809}
810
811
812
813
814
815static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
816{
817 tk->tai_offset = tai_offset;
818 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
819}
820
821
822
823
824
825void timekeeping_set_tai_offset(s32 tai_offset)
826{
827 struct timekeeper *tk = &tk_core.timekeeper;
828 unsigned long flags;
829
830 raw_spin_lock_irqsave(&timekeeper_lock, flags);
831 write_seqcount_begin(&tk_core.seq);
832 __timekeeping_set_tai_offset(tk, tai_offset);
833 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
834 write_seqcount_end(&tk_core.seq);
835 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
836 clock_was_set();
837}
838
839
840
841
842
843
844static int change_clocksource(void *data)
845{
846 struct timekeeper *tk = &tk_core.timekeeper;
847 struct clocksource *new, *old;
848 unsigned long flags;
849
850 new = (struct clocksource *) data;
851
852 raw_spin_lock_irqsave(&timekeeper_lock, flags);
853 write_seqcount_begin(&tk_core.seq);
854
855 timekeeping_forward_now(tk);
856
857
858
859
860 if (try_module_get(new->owner)) {
861 if (!new->enable || new->enable(new) == 0) {
862 old = tk->tkr.clock;
863 tk_setup_internals(tk, new);
864 if (old->disable)
865 old->disable(old);
866 module_put(old->owner);
867 } else {
868 module_put(new->owner);
869 }
870 }
871 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
872
873 write_seqcount_end(&tk_core.seq);
874 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
875
876 return 0;
877}
878
879
880
881
882
883
884
885
886int timekeeping_notify(struct clocksource *clock)
887{
888 struct timekeeper *tk = &tk_core.timekeeper;
889
890 if (tk->tkr.clock == clock)
891 return 0;
892 stop_machine(change_clocksource, clock, NULL);
893 tick_clock_notify();
894 return tk->tkr.clock == clock ? 0 : -1;
895}
896
897
898
899
900
901
902
903void getrawmonotonic(struct timespec *ts)
904{
905 struct timekeeper *tk = &tk_core.timekeeper;
906 struct timespec64 ts64;
907 unsigned long seq;
908 s64 nsecs;
909
910 do {
911 seq = read_seqcount_begin(&tk_core.seq);
912 nsecs = timekeeping_get_ns_raw(tk);
913 ts64 = tk->raw_time;
914
915 } while (read_seqcount_retry(&tk_core.seq, seq));
916
917 timespec64_add_ns(&ts64, nsecs);
918 *ts = timespec64_to_timespec(ts64);
919}
920EXPORT_SYMBOL(getrawmonotonic);
921
922
923
924
925int timekeeping_valid_for_hres(void)
926{
927 struct timekeeper *tk = &tk_core.timekeeper;
928 unsigned long seq;
929 int ret;
930
931 do {
932 seq = read_seqcount_begin(&tk_core.seq);
933
934 ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
935
936 } while (read_seqcount_retry(&tk_core.seq, seq));
937
938 return ret;
939}
940
941
942
943
944u64 timekeeping_max_deferment(void)
945{
946 struct timekeeper *tk = &tk_core.timekeeper;
947 unsigned long seq;
948 u64 ret;
949
950 do {
951 seq = read_seqcount_begin(&tk_core.seq);
952
953 ret = tk->tkr.clock->max_idle_ns;
954
955 } while (read_seqcount_retry(&tk_core.seq, seq));
956
957 return ret;
958}
959
960
961
962
963
964
965
966
967
968
969void __weak read_persistent_clock(struct timespec *ts)
970{
971 ts->tv_sec = 0;
972 ts->tv_nsec = 0;
973}
974
975
976
977
978
979
980
981
982
983
984void __weak read_boot_clock(struct timespec *ts)
985{
986 ts->tv_sec = 0;
987 ts->tv_nsec = 0;
988}
989
990
991
992
993void __init timekeeping_init(void)
994{
995 struct timekeeper *tk = &tk_core.timekeeper;
996 struct clocksource *clock;
997 unsigned long flags;
998 struct timespec64 now, boot, tmp;
999 struct timespec ts;
1000
1001 read_persistent_clock(&ts);
1002 now = timespec_to_timespec64(ts);
1003 if (!timespec64_valid_strict(&now)) {
1004 pr_warn("WARNING: Persistent clock returned invalid value!\n"
1005 " Check your CMOS/BIOS settings.\n");
1006 now.tv_sec = 0;
1007 now.tv_nsec = 0;
1008 } else if (now.tv_sec || now.tv_nsec)
1009 persistent_clock_exist = true;
1010
1011 read_boot_clock(&ts);
1012 boot = timespec_to_timespec64(ts);
1013 if (!timespec64_valid_strict(&boot)) {
1014 pr_warn("WARNING: Boot clock returned invalid value!\n"
1015 " Check your CMOS/BIOS settings.\n");
1016 boot.tv_sec = 0;
1017 boot.tv_nsec = 0;
1018 }
1019
1020 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1021 write_seqcount_begin(&tk_core.seq);
1022 ntp_init();
1023
1024 clock = clocksource_default_clock();
1025 if (clock->enable)
1026 clock->enable(clock);
1027 tk_setup_internals(tk, clock);
1028
1029 tk_set_xtime(tk, &now);
1030 tk->raw_time.tv_sec = 0;
1031 tk->raw_time.tv_nsec = 0;
1032 tk->base_raw.tv64 = 0;
1033 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
1034 boot = tk_xtime(tk);
1035
1036 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1037 tk_set_wall_to_mono(tk, tmp);
1038
1039 timekeeping_update(tk, TK_MIRROR);
1040
1041 write_seqcount_end(&tk_core.seq);
1042 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1043}
1044
1045
1046static struct timespec64 timekeeping_suspend_time;
1047
1048
1049
1050
1051
1052
1053
1054
1055static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
1056 struct timespec64 *delta)
1057{
1058 if (!timespec64_valid_strict(delta)) {
1059 printk_deferred(KERN_WARNING
1060 "__timekeeping_inject_sleeptime: Invalid "
1061 "sleep delta value!\n");
1062 return;
1063 }
1064 tk_xtime_add(tk, delta);
1065 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
1066 tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
1067 tk_debug_account_sleep_time(delta);
1068}
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080void timekeeping_inject_sleeptime(struct timespec *delta)
1081{
1082 struct timekeeper *tk = &tk_core.timekeeper;
1083 struct timespec64 tmp;
1084 unsigned long flags;
1085
1086
1087
1088
1089
1090 if (has_persistent_clock())
1091 return;
1092
1093 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1094 write_seqcount_begin(&tk_core.seq);
1095
1096 timekeeping_forward_now(tk);
1097
1098 tmp = timespec_to_timespec64(*delta);
1099 __timekeeping_inject_sleeptime(tk, &tmp);
1100
1101 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
1102
1103 write_seqcount_end(&tk_core.seq);
1104 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1105
1106
1107 clock_was_set();
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117static void timekeeping_resume(void)
1118{
1119 struct timekeeper *tk = &tk_core.timekeeper;
1120 struct clocksource *clock = tk->tkr.clock;
1121 unsigned long flags;
1122 struct timespec64 ts_new, ts_delta;
1123 struct timespec tmp;
1124 cycle_t cycle_now, cycle_delta;
1125 bool suspendtime_found = false;
1126
1127 read_persistent_clock(&tmp);
1128 ts_new = timespec_to_timespec64(tmp);
1129
1130 clockevents_resume();
1131 clocksource_resume();
1132
1133 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1134 write_seqcount_begin(&tk_core.seq);
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148 cycle_now = tk->tkr.read(clock);
1149 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
1150 cycle_now > tk->tkr.cycle_last) {
1151 u64 num, max = ULLONG_MAX;
1152 u32 mult = clock->mult;
1153 u32 shift = clock->shift;
1154 s64 nsec = 0;
1155
1156 cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last,
1157 tk->tkr.mask);
1158
1159
1160
1161
1162
1163
1164 do_div(max, mult);
1165 if (cycle_delta > max) {
1166 num = div64_u64(cycle_delta, max);
1167 nsec = (((u64) max * mult) >> shift) * num;
1168 cycle_delta -= num * max;
1169 }
1170 nsec += ((u64) cycle_delta * mult) >> shift;
1171
1172 ts_delta = ns_to_timespec64(nsec);
1173 suspendtime_found = true;
1174 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1175 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1176 suspendtime_found = true;
1177 }
1178
1179 if (suspendtime_found)
1180 __timekeeping_inject_sleeptime(tk, &ts_delta);
1181
1182
1183 tk->tkr.cycle_last = cycle_now;
1184 tk->ntp_error = 0;
1185 timekeeping_suspended = 0;
1186 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1187 write_seqcount_end(&tk_core.seq);
1188 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1189
1190 touch_softlockup_watchdog();
1191
1192 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
1193
1194
1195 hrtimers_resume();
1196}
1197
1198static int timekeeping_suspend(void)
1199{
1200 struct timekeeper *tk = &tk_core.timekeeper;
1201 unsigned long flags;
1202 struct timespec64 delta, delta_delta;
1203 static struct timespec64 old_delta;
1204 struct timespec tmp;
1205
1206 read_persistent_clock(&tmp);
1207 timekeeping_suspend_time = timespec_to_timespec64(tmp);
1208
1209
1210
1211
1212
1213
1214 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1215 persistent_clock_exist = true;
1216
1217 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1218 write_seqcount_begin(&tk_core.seq);
1219 timekeeping_forward_now(tk);
1220 timekeeping_suspended = 1;
1221
1222
1223
1224
1225
1226
1227
1228 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1229 delta_delta = timespec64_sub(delta, old_delta);
1230 if (abs(delta_delta.tv_sec) >= 2) {
1231
1232
1233
1234
1235 old_delta = delta;
1236 } else {
1237
1238 timekeeping_suspend_time =
1239 timespec64_add(timekeeping_suspend_time, delta_delta);
1240 }
1241
1242 timekeeping_update(tk, TK_MIRROR);
1243 write_seqcount_end(&tk_core.seq);
1244 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1245
1246 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
1247 clocksource_suspend();
1248 clockevents_suspend();
1249
1250 return 0;
1251}
1252
1253
1254static struct syscore_ops timekeeping_syscore_ops = {
1255 .resume = timekeeping_resume,
1256 .suspend = timekeeping_suspend,
1257};
1258
1259static int __init timekeeping_init_ops(void)
1260{
1261 register_syscore_ops(&timekeeping_syscore_ops);
1262 return 0;
1263}
1264device_initcall(timekeeping_init_ops);
1265
1266
1267
1268
1269static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
1270 s64 offset,
1271 bool negative,
1272 int adj_scale)
1273{
1274 s64 interval = tk->cycle_interval;
1275 s32 mult_adj = 1;
1276
1277 if (negative) {
1278 mult_adj = -mult_adj;
1279 interval = -interval;
1280 offset = -offset;
1281 }
1282 mult_adj <<= adj_scale;
1283 interval <<= adj_scale;
1284 offset <<= adj_scale;
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335 tk->tkr.mult += mult_adj;
1336 tk->xtime_interval += interval;
1337 tk->tkr.xtime_nsec -= offset;
1338 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1339}
1340
1341
1342
1343
1344
1345static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
1346 s64 offset)
1347{
1348 s64 interval = tk->cycle_interval;
1349 s64 xinterval = tk->xtime_interval;
1350 s64 tick_error;
1351 bool negative;
1352 u32 adj;
1353
1354
1355 if (tk->ntp_err_mult)
1356 xinterval -= tk->cycle_interval;
1357
1358 tk->ntp_tick = ntp_tick_length();
1359
1360
1361 tick_error = ntp_tick_length() >> tk->ntp_error_shift;
1362 tick_error -= (xinterval + tk->xtime_remainder);
1363
1364
1365 if (likely((tick_error >= 0) && (tick_error <= interval)))
1366 return;
1367
1368
1369 negative = (tick_error < 0);
1370
1371
1372 tick_error = abs(tick_error);
1373 for (adj = 0; tick_error > interval; adj++)
1374 tick_error >>= 1;
1375
1376
1377 timekeeping_apply_adjustment(tk, offset, negative, adj);
1378}
1379
1380
1381
1382
1383
1384static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1385{
1386
1387 timekeeping_freqadjust(tk, offset);
1388
1389
1390 if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
1391 tk->ntp_err_mult = 1;
1392 timekeeping_apply_adjustment(tk, offset, 0, 0);
1393 } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
1394
1395 timekeeping_apply_adjustment(tk, offset, 1, 0);
1396 tk->ntp_err_mult = 0;
1397 }
1398
1399 if (unlikely(tk->tkr.clock->maxadj &&
1400 (tk->tkr.mult > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
1401 printk_once(KERN_WARNING
1402 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1403 tk->tkr.clock->name, (long)tk->tkr.mult,
1404 (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
1405 }
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421 if (unlikely((s64)tk->tkr.xtime_nsec < 0)) {
1422 s64 neg = -(s64)tk->tkr.xtime_nsec;
1423 tk->tkr.xtime_nsec = 0;
1424 tk->ntp_error += neg << tk->ntp_error_shift;
1425 }
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1437{
1438 u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift;
1439 unsigned int clock_set = 0;
1440
1441 while (tk->tkr.xtime_nsec >= nsecps) {
1442 int leap;
1443
1444 tk->tkr.xtime_nsec -= nsecps;
1445 tk->xtime_sec++;
1446
1447
1448 leap = second_overflow(tk->xtime_sec);
1449 if (unlikely(leap)) {
1450 struct timespec64 ts;
1451
1452 tk->xtime_sec += leap;
1453
1454 ts.tv_sec = leap;
1455 ts.tv_nsec = 0;
1456 tk_set_wall_to_mono(tk,
1457 timespec64_sub(tk->wall_to_monotonic, ts));
1458
1459 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1460
1461 clock_set = TK_CLOCK_WAS_SET;
1462 }
1463 }
1464 return clock_set;
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1477 u32 shift,
1478 unsigned int *clock_set)
1479{
1480 cycle_t interval = tk->cycle_interval << shift;
1481 u64 raw_nsecs;
1482
1483
1484 if (offset < interval)
1485 return offset;
1486
1487
1488 offset -= interval;
1489 tk->tkr.cycle_last += interval;
1490
1491 tk->tkr.xtime_nsec += tk->xtime_interval << shift;
1492 *clock_set |= accumulate_nsecs_to_secs(tk);
1493
1494
1495 raw_nsecs = (u64)tk->raw_interval << shift;
1496 raw_nsecs += tk->raw_time.tv_nsec;
1497 if (raw_nsecs >= NSEC_PER_SEC) {
1498 u64 raw_secs = raw_nsecs;
1499 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1500 tk->raw_time.tv_sec += raw_secs;
1501 }
1502 tk->raw_time.tv_nsec = raw_nsecs;
1503
1504
1505 tk->ntp_error += tk->ntp_tick << shift;
1506 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1507 (tk->ntp_error_shift + shift);
1508
1509 return offset;
1510}
1511
1512
1513
1514
1515
1516void update_wall_time(void)
1517{
1518 struct timekeeper *real_tk = &tk_core.timekeeper;
1519 struct timekeeper *tk = &shadow_timekeeper;
1520 cycle_t offset;
1521 int shift = 0, maxshift;
1522 unsigned int clock_set = 0;
1523 unsigned long flags;
1524
1525 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1526
1527
1528 if (unlikely(timekeeping_suspended))
1529 goto out;
1530
1531#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1532 offset = real_tk->cycle_interval;
1533#else
1534 offset = clocksource_delta(tk->tkr.read(tk->tkr.clock),
1535 tk->tkr.cycle_last, tk->tkr.mask);
1536#endif
1537
1538
1539 if (offset < real_tk->cycle_interval)
1540 goto out;
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1551 shift = max(0, shift);
1552
1553 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1554 shift = min(shift, maxshift);
1555 while (offset >= tk->cycle_interval) {
1556 offset = logarithmic_accumulation(tk, offset, shift,
1557 &clock_set);
1558 if (offset < tk->cycle_interval<<shift)
1559 shift--;
1560 }
1561
1562
1563 timekeeping_adjust(tk, offset);
1564
1565
1566
1567
1568
1569 old_vsyscall_fixup(tk);
1570
1571
1572
1573
1574
1575 clock_set |= accumulate_nsecs_to_secs(tk);
1576
1577 write_seqcount_begin(&tk_core.seq);
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588 memcpy(real_tk, tk, sizeof(*tk));
1589 timekeeping_update(real_tk, clock_set);
1590 write_seqcount_end(&tk_core.seq);
1591out:
1592 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1593 if (clock_set)
1594
1595 clock_was_set_delayed();
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609void getboottime(struct timespec *ts)
1610{
1611 struct timekeeper *tk = &tk_core.timekeeper;
1612 ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
1613
1614 *ts = ktime_to_timespec(t);
1615}
1616EXPORT_SYMBOL_GPL(getboottime);
1617
1618unsigned long get_seconds(void)
1619{
1620 struct timekeeper *tk = &tk_core.timekeeper;
1621
1622 return tk->xtime_sec;
1623}
1624EXPORT_SYMBOL(get_seconds);
1625
1626struct timespec __current_kernel_time(void)
1627{
1628 struct timekeeper *tk = &tk_core.timekeeper;
1629
1630 return timespec64_to_timespec(tk_xtime(tk));
1631}
1632
1633struct timespec current_kernel_time(void)
1634{
1635 struct timekeeper *tk = &tk_core.timekeeper;
1636 struct timespec64 now;
1637 unsigned long seq;
1638
1639 do {
1640 seq = read_seqcount_begin(&tk_core.seq);
1641
1642 now = tk_xtime(tk);
1643 } while (read_seqcount_retry(&tk_core.seq, seq));
1644
1645 return timespec64_to_timespec(now);
1646}
1647EXPORT_SYMBOL(current_kernel_time);
1648
1649struct timespec get_monotonic_coarse(void)
1650{
1651 struct timekeeper *tk = &tk_core.timekeeper;
1652 struct timespec64 now, mono;
1653 unsigned long seq;
1654
1655 do {
1656 seq = read_seqcount_begin(&tk_core.seq);
1657
1658 now = tk_xtime(tk);
1659 mono = tk->wall_to_monotonic;
1660 } while (read_seqcount_retry(&tk_core.seq, seq));
1661
1662 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
1663 now.tv_nsec + mono.tv_nsec);
1664
1665 return timespec64_to_timespec(now);
1666}
1667
1668
1669
1670
1671void do_timer(unsigned long ticks)
1672{
1673 jiffies_64 += ticks;
1674 calc_global_load(ticks);
1675}
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
1686 ktime_t *offs_tai)
1687{
1688 struct timekeeper *tk = &tk_core.timekeeper;
1689 unsigned int seq;
1690 ktime_t base;
1691 u64 nsecs;
1692
1693 do {
1694 seq = read_seqcount_begin(&tk_core.seq);
1695
1696 base = tk->tkr.base_mono;
1697 nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift;
1698
1699 *offs_real = tk->offs_real;
1700 *offs_boot = tk->offs_boot;
1701 *offs_tai = tk->offs_tai;
1702 } while (read_seqcount_retry(&tk_core.seq, seq));
1703
1704 return ktime_add_ns(base, nsecs);
1705}
1706
1707#ifdef CONFIG_HIGH_RES_TIMERS
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
1718 ktime_t *offs_tai)
1719{
1720 struct timekeeper *tk = &tk_core.timekeeper;
1721 unsigned int seq;
1722 ktime_t base;
1723 u64 nsecs;
1724
1725 do {
1726 seq = read_seqcount_begin(&tk_core.seq);
1727
1728 base = tk->tkr.base_mono;
1729 nsecs = timekeeping_get_ns(&tk->tkr);
1730
1731 *offs_real = tk->offs_real;
1732 *offs_boot = tk->offs_boot;
1733 *offs_tai = tk->offs_tai;
1734 } while (read_seqcount_retry(&tk_core.seq, seq));
1735
1736 return ktime_add_ns(base, nsecs);
1737}
1738#endif
1739
1740
1741
1742
1743int do_adjtimex(struct timex *txc)
1744{
1745 struct timekeeper *tk = &tk_core.timekeeper;
1746 unsigned long flags;
1747 struct timespec64 ts;
1748 s32 orig_tai, tai;
1749 int ret;
1750
1751
1752 ret = ntp_validate_timex(txc);
1753 if (ret)
1754 return ret;
1755
1756 if (txc->modes & ADJ_SETOFFSET) {
1757 struct timespec delta;
1758 delta.tv_sec = txc->time.tv_sec;
1759 delta.tv_nsec = txc->time.tv_usec;
1760 if (!(txc->modes & ADJ_NANO))
1761 delta.tv_nsec *= 1000;
1762 ret = timekeeping_inject_offset(&delta);
1763 if (ret)
1764 return ret;
1765 }
1766
1767 getnstimeofday64(&ts);
1768
1769 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1770 write_seqcount_begin(&tk_core.seq);
1771
1772 orig_tai = tai = tk->tai_offset;
1773 ret = __do_adjtimex(txc, &ts, &tai);
1774
1775 if (tai != orig_tai) {
1776 __timekeeping_set_tai_offset(tk, tai);
1777 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1778 }
1779 write_seqcount_end(&tk_core.seq);
1780 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1781
1782 if (tai != orig_tai)
1783 clock_was_set();
1784
1785 ntp_notify_cmos_timer();
1786
1787 return ret;
1788}
1789
1790#ifdef CONFIG_NTP_PPS
1791
1792
1793
1794void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1795{
1796 unsigned long flags;
1797
1798 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1799 write_seqcount_begin(&tk_core.seq);
1800
1801 __hardpps(phase_ts, raw_ts);
1802
1803 write_seqcount_end(&tk_core.seq);
1804 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1805}
1806EXPORT_SYMBOL(hardpps);
1807#endif
1808
1809
1810
1811
1812
1813
1814
1815void xtime_update(unsigned long ticks)
1816{
1817 write_seqlock(&jiffies_lock);
1818 do_timer(ticks);
1819 write_sequnlock(&jiffies_lock);
1820 update_wall_time();
1821}
1822