linux/kernel/time/timekeeping.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/time/timekeeping.c
   3 *
   4 *  Kernel timekeeping code and accessor functions
   5 *
   6 *  This code was moved from linux/kernel/timer.c.
   7 *  Please see that file for copyright and history logs.
   8 *
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/interrupt.h>
  13#include <linux/percpu.h>
  14#include <linux/init.h>
  15#include <linux/mm.h>
  16#include <linux/sched.h>
  17#include <linux/sysdev.h>
  18#include <linux/clocksource.h>
  19#include <linux/jiffies.h>
  20#include <linux/time.h>
  21#include <linux/tick.h>
  22#include <linux/stop_machine.h>
  23
  24/* Structure holding internal timekeeping values. */
  25struct timekeeper {
  26        /* Current clocksource used for timekeeping. */
  27        struct clocksource *clock;
  28        /* The shift value of the current clocksource. */
  29        int     shift;
  30
  31        /* Number of clock cycles in one NTP interval. */
  32        cycle_t cycle_interval;
  33        /* Number of clock shifted nano seconds in one NTP interval. */
  34        u64     xtime_interval;
  35        /* Raw nano seconds accumulated per NTP interval. */
  36        u32     raw_interval;
  37
  38        /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
  39        u64     xtime_nsec;
  40        /* Difference between accumulated time and NTP time in ntp
  41         * shifted nano seconds. */
  42        s64     ntp_error;
  43        /* Shift conversion between clock shifted nano seconds and
  44         * ntp shifted nano seconds. */
  45        int     ntp_error_shift;
  46        /* NTP adjusted clock multiplier */
  47        u32     mult;
  48};
  49
  50struct timekeeper timekeeper;
  51
  52/**
  53 * timekeeper_setup_internals - Set up internals to use clocksource clock.
  54 *
  55 * @clock:              Pointer to clocksource.
  56 *
  57 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
  58 * pair and interval request.
  59 *
  60 * Unless you're the timekeeping code, you should not be using this!
  61 */
  62static void timekeeper_setup_internals(struct clocksource *clock)
  63{
  64        cycle_t interval;
  65        u64 tmp;
  66
  67        timekeeper.clock = clock;
  68        clock->cycle_last = clock->read(clock);
  69
  70        /* Do the ns -> cycle conversion first, using original mult */
  71        tmp = NTP_INTERVAL_LENGTH;
  72        tmp <<= clock->shift;
  73        tmp += clock->mult/2;
  74        do_div(tmp, clock->mult);
  75        if (tmp == 0)
  76                tmp = 1;
  77
  78        interval = (cycle_t) tmp;
  79        timekeeper.cycle_interval = interval;
  80
  81        /* Go back from cycles -> shifted ns */
  82        timekeeper.xtime_interval = (u64) interval * clock->mult;
  83        timekeeper.raw_interval =
  84                ((u64) interval * clock->mult) >> clock->shift;
  85
  86        timekeeper.xtime_nsec = 0;
  87        timekeeper.shift = clock->shift;
  88
  89        timekeeper.ntp_error = 0;
  90        timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
  91
  92        /*
  93         * The timekeeper keeps its own mult values for the currently
  94         * active clocksource. These value will be adjusted via NTP
  95         * to counteract clock drifting.
  96         */
  97        timekeeper.mult = clock->mult;
  98}
  99
 100/* Timekeeper helper functions. */
 101static inline s64 timekeeping_get_ns(void)
 102{
 103        cycle_t cycle_now, cycle_delta;
 104        struct clocksource *clock;
 105
 106        /* read clocksource: */
 107        clock = timekeeper.clock;
 108        cycle_now = clock->read(clock);
 109
 110        /* calculate the delta since the last update_wall_time: */
 111        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
 112
 113        /* return delta convert to nanoseconds using ntp adjusted mult. */
 114        return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
 115                                  timekeeper.shift);
 116}
 117
 118static inline s64 timekeeping_get_ns_raw(void)
 119{
 120        cycle_t cycle_now, cycle_delta;
 121        struct clocksource *clock;
 122
 123        /* read clocksource: */
 124        clock = timekeeper.clock;
 125        cycle_now = clock->read(clock);
 126
 127        /* calculate the delta since the last update_wall_time: */
 128        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
 129
 130        /* return delta convert to nanoseconds using ntp adjusted mult. */
 131        return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
 132}
 133
 134/*
 135 * This read-write spinlock protects us from races in SMP while
 136 * playing with xtime.
 137 */
 138__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
 139
 140
 141/*
 142 * The current time
 143 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
 144 * for sub jiffie times) to get to monotonic time.  Monotonic is pegged
 145 * at zero at system boot time, so wall_to_monotonic will be negative,
 146 * however, we will ALWAYS keep the tv_nsec part positive so we can use
 147 * the usual normalization.
 148 *
 149 * wall_to_monotonic is moved after resume from suspend for the monotonic
 150 * time not to jump. We need to add total_sleep_time to wall_to_monotonic
 151 * to get the real boot based time offset.
 152 *
 153 * - wall_to_monotonic is no longer the boot time, getboottime must be
 154 * used instead.
 155 */
 156struct timespec xtime __attribute__ ((aligned (16)));
 157struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
 158static struct timespec total_sleep_time;
 159
 160/*
 161 * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
 162 */
 163struct timespec raw_time;
 164
 165/* flag for if timekeeping is suspended */
 166int __read_mostly timekeeping_suspended;
 167
 168static struct timespec xtime_cache __attribute__ ((aligned (16)));
 169void update_xtime_cache(u64 nsec)
 170{
 171        xtime_cache = xtime;
 172        timespec_add_ns(&xtime_cache, nsec);
 173}
 174
 175/* must hold xtime_lock */
 176void timekeeping_leap_insert(int leapsecond)
 177{
 178        xtime.tv_sec += leapsecond;
 179        wall_to_monotonic.tv_sec -= leapsecond;
 180        update_vsyscall(&xtime, timekeeper.clock);
 181}
 182
 183#ifdef CONFIG_GENERIC_TIME
 184
 185/**
 186 * timekeeping_forward_now - update clock to the current time
 187 *
 188 * Forward the current clock to update its state since the last call to
 189 * update_wall_time(). This is useful before significant clock changes,
 190 * as it avoids having to deal with this time offset explicitly.
 191 */
 192static void timekeeping_forward_now(void)
 193{
 194        cycle_t cycle_now, cycle_delta;
 195        struct clocksource *clock;
 196        s64 nsec;
 197
 198        clock = timekeeper.clock;
 199        cycle_now = clock->read(clock);
 200        cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
 201        clock->cycle_last = cycle_now;
 202
 203        nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
 204                                  timekeeper.shift);
 205
 206        /* If arch requires, add in gettimeoffset() */
 207        nsec += arch_gettimeoffset();
 208
 209        timespec_add_ns(&xtime, nsec);
 210
 211        nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
 212        timespec_add_ns(&raw_time, nsec);
 213}
 214
 215/**
 216 * getnstimeofday - Returns the time of day in a timespec
 217 * @ts:         pointer to the timespec to be set
 218 *
 219 * Returns the time of day in a timespec.
 220 */
 221void getnstimeofday(struct timespec *ts)
 222{
 223        unsigned long seq;
 224        s64 nsecs;
 225
 226        WARN_ON(timekeeping_suspended);
 227
 228        do {
 229                seq = read_seqbegin(&xtime_lock);
 230
 231                *ts = xtime;
 232                nsecs = timekeeping_get_ns();
 233
 234                /* If arch requires, add in gettimeoffset() */
 235                nsecs += arch_gettimeoffset();
 236
 237        } while (read_seqretry(&xtime_lock, seq));
 238
 239        timespec_add_ns(ts, nsecs);
 240}
 241
 242EXPORT_SYMBOL(getnstimeofday);
 243
 244ktime_t ktime_get(void)
 245{
 246        unsigned int seq;
 247        s64 secs, nsecs;
 248
 249        WARN_ON(timekeeping_suspended);
 250
 251        do {
 252                seq = read_seqbegin(&xtime_lock);
 253                secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
 254                nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
 255                nsecs += timekeeping_get_ns();
 256
 257        } while (read_seqretry(&xtime_lock, seq));
 258        /*
 259         * Use ktime_set/ktime_add_ns to create a proper ktime on
 260         * 32-bit architectures without CONFIG_KTIME_SCALAR.
 261         */
 262        return ktime_add_ns(ktime_set(secs, 0), nsecs);
 263}
 264EXPORT_SYMBOL_GPL(ktime_get);
 265
 266/**
 267 * ktime_get_ts - get the monotonic clock in timespec format
 268 * @ts:         pointer to timespec variable
 269 *
 270 * The function calculates the monotonic clock from the realtime
 271 * clock and the wall_to_monotonic offset and stores the result
 272 * in normalized timespec format in the variable pointed to by @ts.
 273 */
 274void ktime_get_ts(struct timespec *ts)
 275{
 276        struct timespec tomono;
 277        unsigned int seq;
 278        s64 nsecs;
 279
 280        WARN_ON(timekeeping_suspended);
 281
 282        do {
 283                seq = read_seqbegin(&xtime_lock);
 284                *ts = xtime;
 285                tomono = wall_to_monotonic;
 286                nsecs = timekeeping_get_ns();
 287
 288        } while (read_seqretry(&xtime_lock, seq));
 289
 290        set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
 291                                ts->tv_nsec + tomono.tv_nsec + nsecs);
 292}
 293EXPORT_SYMBOL_GPL(ktime_get_ts);
 294
 295/**
 296 * do_gettimeofday - Returns the time of day in a timeval
 297 * @tv:         pointer to the timeval to be set
 298 *
 299 * NOTE: Users should be converted to using getnstimeofday()
 300 */
 301void do_gettimeofday(struct timeval *tv)
 302{
 303        struct timespec now;
 304
 305        getnstimeofday(&now);
 306        tv->tv_sec = now.tv_sec;
 307        tv->tv_usec = now.tv_nsec/1000;
 308}
 309
 310EXPORT_SYMBOL(do_gettimeofday);
 311/**
 312 * do_settimeofday - Sets the time of day
 313 * @tv:         pointer to the timespec variable containing the new time
 314 *
 315 * Sets the time of day to the new time and update NTP and notify hrtimers
 316 */
 317int do_settimeofday(struct timespec *tv)
 318{
 319        struct timespec ts_delta;
 320        unsigned long flags;
 321
 322        if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
 323                return -EINVAL;
 324
 325        write_seqlock_irqsave(&xtime_lock, flags);
 326
 327        timekeeping_forward_now();
 328
 329        ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
 330        ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
 331        wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
 332
 333        xtime = *tv;
 334
 335        update_xtime_cache(0);
 336
 337        timekeeper.ntp_error = 0;
 338        ntp_clear();
 339
 340        update_vsyscall(&xtime, timekeeper.clock);
 341
 342        write_sequnlock_irqrestore(&xtime_lock, flags);
 343
 344        /* signal hrtimers about time change */
 345        clock_was_set();
 346
 347        return 0;
 348}
 349
 350EXPORT_SYMBOL(do_settimeofday);
 351
 352/**
 353 * change_clocksource - Swaps clocksources if a new one is available
 354 *
 355 * Accumulates current time interval and initializes new clocksource
 356 */
 357static int change_clocksource(void *data)
 358{
 359        struct clocksource *new, *old;
 360
 361        new = (struct clocksource *) data;
 362
 363        timekeeping_forward_now();
 364        if (!new->enable || new->enable(new) == 0) {
 365                old = timekeeper.clock;
 366                timekeeper_setup_internals(new);
 367                if (old->disable)
 368                        old->disable(old);
 369        }
 370        return 0;
 371}
 372
 373/**
 374 * timekeeping_notify - Install a new clock source
 375 * @clock:              pointer to the clock source
 376 *
 377 * This function is called from clocksource.c after a new, better clock
 378 * source has been registered. The caller holds the clocksource_mutex.
 379 */
 380void timekeeping_notify(struct clocksource *clock)
 381{
 382        if (timekeeper.clock == clock)
 383                return;
 384        stop_machine(change_clocksource, clock, NULL);
 385        tick_clock_notify();
 386}
 387
 388#else /* GENERIC_TIME */
 389
 390static inline void timekeeping_forward_now(void) { }
 391
 392/**
 393 * ktime_get - get the monotonic time in ktime_t format
 394 *
 395 * returns the time in ktime_t format
 396 */
 397ktime_t ktime_get(void)
 398{
 399        struct timespec now;
 400
 401        ktime_get_ts(&now);
 402
 403        return timespec_to_ktime(now);
 404}
 405EXPORT_SYMBOL_GPL(ktime_get);
 406
 407/**
 408 * ktime_get_ts - get the monotonic clock in timespec format
 409 * @ts:         pointer to timespec variable
 410 *
 411 * The function calculates the monotonic clock from the realtime
 412 * clock and the wall_to_monotonic offset and stores the result
 413 * in normalized timespec format in the variable pointed to by @ts.
 414 */
 415void ktime_get_ts(struct timespec *ts)
 416{
 417        struct timespec tomono;
 418        unsigned long seq;
 419
 420        do {
 421                seq = read_seqbegin(&xtime_lock);
 422                getnstimeofday(ts);
 423                tomono = wall_to_monotonic;
 424
 425        } while (read_seqretry(&xtime_lock, seq));
 426
 427        set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
 428                                ts->tv_nsec + tomono.tv_nsec);
 429}
 430EXPORT_SYMBOL_GPL(ktime_get_ts);
 431
 432#endif /* !GENERIC_TIME */
 433
 434/**
 435 * ktime_get_real - get the real (wall-) time in ktime_t format
 436 *
 437 * returns the time in ktime_t format
 438 */
 439ktime_t ktime_get_real(void)
 440{
 441        struct timespec now;
 442
 443        getnstimeofday(&now);
 444
 445        return timespec_to_ktime(now);
 446}
 447EXPORT_SYMBOL_GPL(ktime_get_real);
 448
 449/**
 450 * getrawmonotonic - Returns the raw monotonic time in a timespec
 451 * @ts:         pointer to the timespec to be set
 452 *
 453 * Returns the raw monotonic time (completely un-modified by ntp)
 454 */
 455void getrawmonotonic(struct timespec *ts)
 456{
 457        unsigned long seq;
 458        s64 nsecs;
 459
 460        do {
 461                seq = read_seqbegin(&xtime_lock);
 462                nsecs = timekeeping_get_ns_raw();
 463                *ts = raw_time;
 464
 465        } while (read_seqretry(&xtime_lock, seq));
 466
 467        timespec_add_ns(ts, nsecs);
 468}
 469EXPORT_SYMBOL(getrawmonotonic);
 470
 471
 472/**
 473 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
 474 */
 475int timekeeping_valid_for_hres(void)
 476{
 477        unsigned long seq;
 478        int ret;
 479
 480        do {
 481                seq = read_seqbegin(&xtime_lock);
 482
 483                ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
 484
 485        } while (read_seqretry(&xtime_lock, seq));
 486
 487        return ret;
 488}
 489
 490/**
 491 * read_persistent_clock -  Return time from the persistent clock.
 492 *
 493 * Weak dummy function for arches that do not yet support it.
 494 * Reads the time from the battery backed persistent clock.
 495 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
 496 *
 497 *  XXX - Do be sure to remove it once all arches implement it.
 498 */
 499void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
 500{
 501        ts->tv_sec = 0;
 502        ts->tv_nsec = 0;
 503}
 504
 505/**
 506 * read_boot_clock -  Return time of the system start.
 507 *
 508 * Weak dummy function for arches that do not yet support it.
 509 * Function to read the exact time the system has been started.
 510 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
 511 *
 512 *  XXX - Do be sure to remove it once all arches implement it.
 513 */
 514void __attribute__((weak)) read_boot_clock(struct timespec *ts)
 515{
 516        ts->tv_sec = 0;
 517        ts->tv_nsec = 0;
 518}
 519
 520/*
 521 * timekeeping_init - Initializes the clocksource and common timekeeping values
 522 */
 523void __init timekeeping_init(void)
 524{
 525        struct clocksource *clock;
 526        unsigned long flags;
 527        struct timespec now, boot;
 528
 529        read_persistent_clock(&now);
 530        read_boot_clock(&boot);
 531
 532        write_seqlock_irqsave(&xtime_lock, flags);
 533
 534        ntp_init();
 535
 536        clock = clocksource_default_clock();
 537        if (clock->enable)
 538                clock->enable(clock);
 539        timekeeper_setup_internals(clock);
 540
 541        xtime.tv_sec = now.tv_sec;
 542        xtime.tv_nsec = now.tv_nsec;
 543        raw_time.tv_sec = 0;
 544        raw_time.tv_nsec = 0;
 545        if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
 546                boot.tv_sec = xtime.tv_sec;
 547                boot.tv_nsec = xtime.tv_nsec;
 548        }
 549        set_normalized_timespec(&wall_to_monotonic,
 550                                -boot.tv_sec, -boot.tv_nsec);
 551        update_xtime_cache(0);
 552        total_sleep_time.tv_sec = 0;
 553        total_sleep_time.tv_nsec = 0;
 554        write_sequnlock_irqrestore(&xtime_lock, flags);
 555}
 556
 557/* time in seconds when suspend began */
 558static struct timespec timekeeping_suspend_time;
 559
 560/**
 561 * timekeeping_resume - Resumes the generic timekeeping subsystem.
 562 * @dev:        unused
 563 *
 564 * This is for the generic clocksource timekeeping.
 565 * xtime/wall_to_monotonic/jiffies/etc are
 566 * still managed by arch specific suspend/resume code.
 567 */
 568static int timekeeping_resume(struct sys_device *dev)
 569{
 570        unsigned long flags;
 571        struct timespec ts;
 572
 573        read_persistent_clock(&ts);
 574
 575        clocksource_resume();
 576
 577        write_seqlock_irqsave(&xtime_lock, flags);
 578
 579        if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
 580                ts = timespec_sub(ts, timekeeping_suspend_time);
 581                xtime = timespec_add_safe(xtime, ts);
 582                wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
 583                total_sleep_time = timespec_add_safe(total_sleep_time, ts);
 584        }
 585        update_xtime_cache(0);
 586        /* re-base the last cycle value */
 587        timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
 588        timekeeper.ntp_error = 0;
 589        timekeeping_suspended = 0;
 590        write_sequnlock_irqrestore(&xtime_lock, flags);
 591
 592        touch_softlockup_watchdog();
 593
 594        clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
 595
 596        /* Resume hrtimers */
 597        hres_timers_resume();
 598
 599        return 0;
 600}
 601
 602static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
 603{
 604        unsigned long flags;
 605
 606        read_persistent_clock(&timekeeping_suspend_time);
 607
 608        write_seqlock_irqsave(&xtime_lock, flags);
 609        timekeeping_forward_now();
 610        timekeeping_suspended = 1;
 611        write_sequnlock_irqrestore(&xtime_lock, flags);
 612
 613        clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
 614
 615        return 0;
 616}
 617
 618/* sysfs resume/suspend bits for timekeeping */
 619static struct sysdev_class timekeeping_sysclass = {
 620        .name           = "timekeeping",
 621        .resume         = timekeeping_resume,
 622        .suspend        = timekeeping_suspend,
 623};
 624
 625static struct sys_device device_timer = {
 626        .id             = 0,
 627        .cls            = &timekeeping_sysclass,
 628};
 629
 630static int __init timekeeping_init_device(void)
 631{
 632        int error = sysdev_class_register(&timekeeping_sysclass);
 633        if (!error)
 634                error = sysdev_register(&device_timer);
 635        return error;
 636}
 637
 638device_initcall(timekeeping_init_device);
 639
 640/*
 641 * If the error is already larger, we look ahead even further
 642 * to compensate for late or lost adjustments.
 643 */
 644static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
 645                                                 s64 *offset)
 646{
 647        s64 tick_error, i;
 648        u32 look_ahead, adj;
 649        s32 error2, mult;
 650
 651        /*
 652         * Use the current error value to determine how much to look ahead.
 653         * The larger the error the slower we adjust for it to avoid problems
 654         * with losing too many ticks, otherwise we would overadjust and
 655         * produce an even larger error.  The smaller the adjustment the
 656         * faster we try to adjust for it, as lost ticks can do less harm
 657         * here.  This is tuned so that an error of about 1 msec is adjusted
 658         * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
 659         */
 660        error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
 661        error2 = abs(error2);
 662        for (look_ahead = 0; error2 > 0; look_ahead++)
 663                error2 >>= 2;
 664
 665        /*
 666         * Now calculate the error in (1 << look_ahead) ticks, but first
 667         * remove the single look ahead already included in the error.
 668         */
 669        tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
 670        tick_error -= timekeeper.xtime_interval >> 1;
 671        error = ((error - tick_error) >> look_ahead) + tick_error;
 672
 673        /* Finally calculate the adjustment shift value.  */
 674        i = *interval;
 675        mult = 1;
 676        if (error < 0) {
 677                error = -error;
 678                *interval = -*interval;
 679                *offset = -*offset;
 680                mult = -1;
 681        }
 682        for (adj = 0; error > i; adj++)
 683                error >>= 1;
 684
 685        *interval <<= adj;
 686        *offset <<= adj;
 687        return mult << adj;
 688}
 689
 690/*
 691 * Adjust the multiplier to reduce the error value,
 692 * this is optimized for the most common adjustments of -1,0,1,
 693 * for other values we can do a bit more work.
 694 */
 695static void timekeeping_adjust(s64 offset)
 696{
 697        s64 error, interval = timekeeper.cycle_interval;
 698        int adj;
 699
 700        error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
 701        if (error > interval) {
 702                error >>= 2;
 703                if (likely(error <= interval))
 704                        adj = 1;
 705                else
 706                        adj = timekeeping_bigadjust(error, &interval, &offset);
 707        } else if (error < -interval) {
 708                error >>= 2;
 709                if (likely(error >= -interval)) {
 710                        adj = -1;
 711                        interval = -interval;
 712                        offset = -offset;
 713                } else
 714                        adj = timekeeping_bigadjust(error, &interval, &offset);
 715        } else
 716                return;
 717
 718        timekeeper.mult += adj;
 719        timekeeper.xtime_interval += interval;
 720        timekeeper.xtime_nsec -= offset;
 721        timekeeper.ntp_error -= (interval - offset) <<
 722                                timekeeper.ntp_error_shift;
 723}
 724
 725/**
 726 * update_wall_time - Uses the current clocksource to increment the wall time
 727 *
 728 * Called from the timer interrupt, must hold a write on xtime_lock.
 729 */
 730void update_wall_time(void)
 731{
 732        struct clocksource *clock;
 733        cycle_t offset;
 734        u64 nsecs;
 735
 736        /* Make sure we're fully resumed: */
 737        if (unlikely(timekeeping_suspended))
 738                return;
 739
 740        clock = timekeeper.clock;
 741#ifdef CONFIG_GENERIC_TIME
 742        offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
 743#else
 744        offset = timekeeper.cycle_interval;
 745#endif
 746        timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
 747
 748        /* normally this loop will run just once, however in the
 749         * case of lost or late ticks, it will accumulate correctly.
 750         */
 751        while (offset >= timekeeper.cycle_interval) {
 752                u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
 753
 754                /* accumulate one interval */
 755                offset -= timekeeper.cycle_interval;
 756                clock->cycle_last += timekeeper.cycle_interval;
 757
 758                timekeeper.xtime_nsec += timekeeper.xtime_interval;
 759                if (timekeeper.xtime_nsec >= nsecps) {
 760                        timekeeper.xtime_nsec -= nsecps;
 761                        xtime.tv_sec++;
 762                        second_overflow();
 763                }
 764
 765                raw_time.tv_nsec += timekeeper.raw_interval;
 766                if (raw_time.tv_nsec >= NSEC_PER_SEC) {
 767                        raw_time.tv_nsec -= NSEC_PER_SEC;
 768                        raw_time.tv_sec++;
 769                }
 770
 771                /* accumulate error between NTP and clock interval */
 772                timekeeper.ntp_error += tick_length;
 773                timekeeper.ntp_error -= timekeeper.xtime_interval <<
 774                                        timekeeper.ntp_error_shift;
 775        }
 776
 777        /* correct the clock when NTP error is too big */
 778        timekeeping_adjust(offset);
 779
 780        /*
 781         * Since in the loop above, we accumulate any amount of time
 782         * in xtime_nsec over a second into xtime.tv_sec, its possible for
 783         * xtime_nsec to be fairly small after the loop. Further, if we're
 784         * slightly speeding the clocksource up in timekeeping_adjust(),
 785         * its possible the required corrective factor to xtime_nsec could
 786         * cause it to underflow.
 787         *
 788         * Now, we cannot simply roll the accumulated second back, since
 789         * the NTP subsystem has been notified via second_overflow. So
 790         * instead we push xtime_nsec forward by the amount we underflowed,
 791         * and add that amount into the error.
 792         *
 793         * We'll correct this error next time through this function, when
 794         * xtime_nsec is not as small.
 795         */
 796        if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
 797                s64 neg = -(s64)timekeeper.xtime_nsec;
 798                timekeeper.xtime_nsec = 0;
 799                timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
 800        }
 801
 802        /* store full nanoseconds into xtime after rounding it up and
 803         * add the remainder to the error difference.
 804         */
 805        xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
 806        timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
 807        timekeeper.ntp_error += timekeeper.xtime_nsec <<
 808                                timekeeper.ntp_error_shift;
 809
 810        nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
 811        update_xtime_cache(nsecs);
 812
 813        /* check to see if there is a new clocksource to use */
 814        update_vsyscall(&xtime, timekeeper.clock);
 815}
 816
 817/**
 818 * getboottime - Return the real time of system boot.
 819 * @ts:         pointer to the timespec to be set
 820 *
 821 * Returns the time of day in a timespec.
 822 *
 823 * This is based on the wall_to_monotonic offset and the total suspend
 824 * time. Calls to settimeofday will affect the value returned (which
 825 * basically means that however wrong your real time clock is at boot time,
 826 * you get the right time here).
 827 */
 828void getboottime(struct timespec *ts)
 829{
 830        struct timespec boottime = {
 831                .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec,
 832                .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec
 833        };
 834
 835        set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
 836}
 837
 838/**
 839 * monotonic_to_bootbased - Convert the monotonic time to boot based.
 840 * @ts:         pointer to the timespec to be converted
 841 */
 842void monotonic_to_bootbased(struct timespec *ts)
 843{
 844        *ts = timespec_add_safe(*ts, total_sleep_time);
 845}
 846
 847unsigned long get_seconds(void)
 848{
 849        return xtime_cache.tv_sec;
 850}
 851EXPORT_SYMBOL(get_seconds);
 852
 853struct timespec __current_kernel_time(void)
 854{
 855        return xtime_cache;
 856}
 857
 858struct timespec current_kernel_time(void)
 859{
 860        struct timespec now;
 861        unsigned long seq;
 862
 863        do {
 864                seq = read_seqbegin(&xtime_lock);
 865
 866                now = xtime_cache;
 867        } while (read_seqretry(&xtime_lock, seq));
 868
 869        return now;
 870}
 871EXPORT_SYMBOL(current_kernel_time);
 872
 873struct timespec get_monotonic_coarse(void)
 874{
 875        struct timespec now, mono;
 876        unsigned long seq;
 877
 878        do {
 879                seq = read_seqbegin(&xtime_lock);
 880
 881                now = xtime_cache;
 882                mono = wall_to_monotonic;
 883        } while (read_seqretry(&xtime_lock, seq));
 884
 885        set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
 886                                now.tv_nsec + mono.tv_nsec);
 887        return now;
 888}
 889