linux/kernel/time/time.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/time.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  This file contains the interface functions for the various
   7 *  time related system calls: time, stime, gettimeofday, settimeofday,
   8 *                             adjtime
   9 */
  10/*
  11 * Modification history kernel/time.c
  12 *
  13 * 1993-09-02    Philip Gladstone
  14 *      Created file with time related functions from sched/core.c and adjtimex()
  15 * 1993-10-08    Torsten Duwe
  16 *      adjtime interface update and CMOS clock write code
  17 * 1995-08-13    Torsten Duwe
  18 *      kernel PLL updated to 1994-12-13 specs (rfc-1589)
  19 * 1999-01-16    Ulrich Windl
  20 *      Introduced error checking for many cases in adjtimex().
  21 *      Updated NTP code according to technical memorandum Jan '96
  22 *      "A Kernel Model for Precision Timekeeping" by Dave Mills
  23 *      Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
  24 *      (Even though the technical memorandum forbids it)
  25 * 2004-07-14    Christoph Lameter
  26 *      Added getnstimeofday to allow the posix timer functions to return
  27 *      with nanosecond accuracy
  28 */
  29
  30#include <linux/export.h>
  31#include <linux/timex.h>
  32#include <linux/capability.h>
  33#include <linux/timekeeper_internal.h>
  34#include <linux/errno.h>
  35#include <linux/syscalls.h>
  36#include <linux/security.h>
  37#include <linux/fs.h>
  38#include <linux/math64.h>
  39#include <linux/ptrace.h>
  40
  41#include <linux/uaccess.h>
  42#include <linux/compat.h>
  43#include <asm/unistd.h>
  44
  45#include <generated/timeconst.h>
  46#include "timekeeping.h"
  47
  48/*
  49 * The timezone where the local system is located.  Used as a default by some
  50 * programs who obtain this value by using gettimeofday.
  51 */
  52struct timezone sys_tz;
  53
  54EXPORT_SYMBOL(sys_tz);
  55
  56#ifdef __ARCH_WANT_SYS_TIME
  57
  58/*
  59 * sys_time() can be implemented in user-level using
  60 * sys_gettimeofday().  Is this for backwards compatibility?  If so,
  61 * why not move it into the appropriate arch directory (for those
  62 * architectures that need it).
  63 */
  64SYSCALL_DEFINE1(time, time_t __user *, tloc)
  65{
  66        time_t i = get_seconds();
  67
  68        if (tloc) {
  69                if (put_user(i,tloc))
  70                        return -EFAULT;
  71        }
  72        force_successful_syscall_return();
  73        return i;
  74}
  75
  76/*
  77 * sys_stime() can be implemented in user-level using
  78 * sys_settimeofday().  Is this for backwards compatibility?  If so,
  79 * why not move it into the appropriate arch directory (for those
  80 * architectures that need it).
  81 */
  82
  83SYSCALL_DEFINE1(stime, time_t __user *, tptr)
  84{
  85        struct timespec tv;
  86        int err;
  87
  88        if (get_user(tv.tv_sec, tptr))
  89                return -EFAULT;
  90
  91        tv.tv_nsec = 0;
  92
  93        err = security_settime(&tv, NULL);
  94        if (err)
  95                return err;
  96
  97        do_settimeofday(&tv);
  98        return 0;
  99}
 100
 101#endif /* __ARCH_WANT_SYS_TIME */
 102
 103#ifdef CONFIG_COMPAT
 104#ifdef __ARCH_WANT_COMPAT_SYS_TIME
 105
 106/* compat_time_t is a 32 bit "long" and needs to get converted. */
 107COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
 108{
 109        struct timeval tv;
 110        compat_time_t i;
 111
 112        do_gettimeofday(&tv);
 113        i = tv.tv_sec;
 114
 115        if (tloc) {
 116                if (put_user(i,tloc))
 117                        return -EFAULT;
 118        }
 119        force_successful_syscall_return();
 120        return i;
 121}
 122
 123COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
 124{
 125        struct timespec tv;
 126        int err;
 127
 128        if (get_user(tv.tv_sec, tptr))
 129                return -EFAULT;
 130
 131        tv.tv_nsec = 0;
 132
 133        err = security_settime(&tv, NULL);
 134        if (err)
 135                return err;
 136
 137        do_settimeofday(&tv);
 138        return 0;
 139}
 140
 141#endif /* __ARCH_WANT_COMPAT_SYS_TIME */
 142#endif
 143
 144SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
 145                struct timezone __user *, tz)
 146{
 147        if (likely(tv != NULL)) {
 148                struct timeval ktv;
 149                do_gettimeofday(&ktv);
 150                if (copy_to_user(tv, &ktv, sizeof(ktv)))
 151                        return -EFAULT;
 152        }
 153        if (unlikely(tz != NULL)) {
 154                if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
 155                        return -EFAULT;
 156        }
 157        return 0;
 158}
 159
 160/*
 161 * Indicates if there is an offset between the system clock and the hardware
 162 * clock/persistent clock/rtc.
 163 */
 164int persistent_clock_is_local;
 165
 166/*
 167 * Adjust the time obtained from the CMOS to be UTC time instead of
 168 * local time.
 169 *
 170 * This is ugly, but preferable to the alternatives.  Otherwise we
 171 * would either need to write a program to do it in /etc/rc (and risk
 172 * confusion if the program gets run more than once; it would also be
 173 * hard to make the program warp the clock precisely n hours)  or
 174 * compile in the timezone information into the kernel.  Bad, bad....
 175 *
 176 *                                              - TYT, 1992-01-01
 177 *
 178 * The best thing to do is to keep the CMOS clock in universal time (UTC)
 179 * as real UNIX machines always do it. This avoids all headaches about
 180 * daylight saving times and warping kernel clocks.
 181 */
 182static inline void warp_clock(void)
 183{
 184        if (sys_tz.tz_minuteswest != 0) {
 185                struct timespec adjust;
 186
 187                persistent_clock_is_local = 1;
 188                adjust.tv_sec = sys_tz.tz_minuteswest * 60;
 189                adjust.tv_nsec = 0;
 190                timekeeping_inject_offset(&adjust);
 191        }
 192}
 193
 194/*
 195 * In case for some reason the CMOS clock has not already been running
 196 * in UTC, but in some local time: The first time we set the timezone,
 197 * we will warp the clock so that it is ticking UTC time instead of
 198 * local time. Presumably, if someone is setting the timezone then we
 199 * are running in an environment where the programs understand about
 200 * timezones. This should be done at boot time in the /etc/rc script,
 201 * as soon as possible, so that the clock can be set right. Otherwise,
 202 * various programs will get confused when the clock gets warped.
 203 */
 204
 205int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
 206{
 207        static int firsttime = 1;
 208        int error = 0;
 209
 210        if (tv && !timespec64_valid(tv))
 211                return -EINVAL;
 212
 213        error = security_settime64(tv, tz);
 214        if (error)
 215                return error;
 216
 217        if (tz) {
 218                /* Verify we're witin the +-15 hrs range */
 219                if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
 220                        return -EINVAL;
 221
 222                sys_tz = *tz;
 223                update_vsyscall_tz();
 224                if (firsttime) {
 225                        firsttime = 0;
 226                        if (!tv)
 227                                warp_clock();
 228                }
 229        }
 230        if (tv)
 231                return do_settimeofday64(tv);
 232        return 0;
 233}
 234
 235SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
 236                struct timezone __user *, tz)
 237{
 238        struct timespec64 new_ts;
 239        struct timeval user_tv;
 240        struct timezone new_tz;
 241
 242        if (tv) {
 243                if (copy_from_user(&user_tv, tv, sizeof(*tv)))
 244                        return -EFAULT;
 245
 246                if (!timeval_valid(&user_tv))
 247                        return -EINVAL;
 248
 249                new_ts.tv_sec = user_tv.tv_sec;
 250                new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
 251        }
 252        if (tz) {
 253                if (copy_from_user(&new_tz, tz, sizeof(*tz)))
 254                        return -EFAULT;
 255        }
 256
 257        return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
 258}
 259
 260#ifdef CONFIG_COMPAT
 261COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
 262                       struct timezone __user *, tz)
 263{
 264        if (tv) {
 265                struct timeval ktv;
 266
 267                do_gettimeofday(&ktv);
 268                if (compat_put_timeval(&ktv, tv))
 269                        return -EFAULT;
 270        }
 271        if (tz) {
 272                if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
 273                        return -EFAULT;
 274        }
 275
 276        return 0;
 277}
 278
 279COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
 280                       struct timezone __user *, tz)
 281{
 282        struct timespec64 new_ts;
 283        struct timeval user_tv;
 284        struct timezone new_tz;
 285
 286        if (tv) {
 287                if (compat_get_timeval(&user_tv, tv))
 288                        return -EFAULT;
 289                new_ts.tv_sec = user_tv.tv_sec;
 290                new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
 291        }
 292        if (tz) {
 293                if (copy_from_user(&new_tz, tz, sizeof(*tz)))
 294                        return -EFAULT;
 295        }
 296
 297        return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
 298}
 299#endif
 300
 301SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
 302{
 303        struct timex txc;               /* Local copy of parameter */
 304        int ret;
 305
 306        /* Copy the user data space into the kernel copy
 307         * structure. But bear in mind that the structures
 308         * may change
 309         */
 310        if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
 311                return -EFAULT;
 312        ret = do_adjtimex(&txc);
 313        return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
 314}
 315
 316#ifdef CONFIG_COMPAT
 317
 318COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
 319{
 320        struct timex txc;
 321        int err, ret;
 322
 323        err = compat_get_timex(&txc, utp);
 324        if (err)
 325                return err;
 326
 327        ret = do_adjtimex(&txc);
 328
 329        err = compat_put_timex(utp, &txc);
 330        if (err)
 331                return err;
 332
 333        return ret;
 334}
 335#endif
 336
 337/*
 338 * Convert jiffies to milliseconds and back.
 339 *
 340 * Avoid unnecessary multiplications/divisions in the
 341 * two most common HZ cases:
 342 */
 343unsigned int jiffies_to_msecs(const unsigned long j)
 344{
 345#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 346        return (MSEC_PER_SEC / HZ) * j;
 347#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
 348        return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
 349#else
 350# if BITS_PER_LONG == 32
 351        return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
 352# else
 353        return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
 354# endif
 355#endif
 356}
 357EXPORT_SYMBOL(jiffies_to_msecs);
 358
 359unsigned int jiffies_to_usecs(const unsigned long j)
 360{
 361        /*
 362         * Hz usually doesn't go much further MSEC_PER_SEC.
 363         * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
 364         */
 365        BUILD_BUG_ON(HZ > USEC_PER_SEC);
 366
 367#if !(USEC_PER_SEC % HZ)
 368        return (USEC_PER_SEC / HZ) * j;
 369#else
 370# if BITS_PER_LONG == 32
 371        return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
 372# else
 373        return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
 374# endif
 375#endif
 376}
 377EXPORT_SYMBOL(jiffies_to_usecs);
 378
 379/**
 380 * timespec_trunc - Truncate timespec to a granularity
 381 * @t: Timespec
 382 * @gran: Granularity in ns.
 383 *
 384 * Truncate a timespec to a granularity. Always rounds down. gran must
 385 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
 386 */
 387struct timespec timespec_trunc(struct timespec t, unsigned gran)
 388{
 389        /* Avoid division in the common cases 1 ns and 1 s. */
 390        if (gran == 1) {
 391                /* nothing */
 392        } else if (gran == NSEC_PER_SEC) {
 393                t.tv_nsec = 0;
 394        } else if (gran > 1 && gran < NSEC_PER_SEC) {
 395                t.tv_nsec -= t.tv_nsec % gran;
 396        } else {
 397                WARN(1, "illegal file time granularity: %u", gran);
 398        }
 399        return t;
 400}
 401EXPORT_SYMBOL(timespec_trunc);
 402
 403/*
 404 * mktime64 - Converts date to seconds.
 405 * Converts Gregorian date to seconds since 1970-01-01 00:00:00.
 406 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
 407 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
 408 *
 409 * [For the Julian calendar (which was used in Russia before 1917,
 410 * Britain & colonies before 1752, anywhere else before 1582,
 411 * and is still in use by some communities) leave out the
 412 * -year/100+year/400 terms, and add 10.]
 413 *
 414 * This algorithm was first published by Gauss (I think).
 415 *
 416 * A leap second can be indicated by calling this function with sec as
 417 * 60 (allowable under ISO 8601).  The leap second is treated the same
 418 * as the following second since they don't exist in UNIX time.
 419 *
 420 * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
 421 * tomorrow - (allowable under ISO 8601) is supported.
 422 */
 423time64_t mktime64(const unsigned int year0, const unsigned int mon0,
 424                const unsigned int day, const unsigned int hour,
 425                const unsigned int min, const unsigned int sec)
 426{
 427        unsigned int mon = mon0, year = year0;
 428
 429        /* 1..12 -> 11,12,1..10 */
 430        if (0 >= (int) (mon -= 2)) {
 431                mon += 12;      /* Puts Feb last since it has leap day */
 432                year -= 1;
 433        }
 434
 435        return ((((time64_t)
 436                  (year/4 - year/100 + year/400 + 367*mon/12 + day) +
 437                  year*365 - 719499
 438            )*24 + hour /* now have hours - midnight tomorrow handled here */
 439          )*60 + min /* now have minutes */
 440        )*60 + sec; /* finally seconds */
 441}
 442EXPORT_SYMBOL(mktime64);
 443
 444/**
 445 * set_normalized_timespec - set timespec sec and nsec parts and normalize
 446 *
 447 * @ts:         pointer to timespec variable to be set
 448 * @sec:        seconds to set
 449 * @nsec:       nanoseconds to set
 450 *
 451 * Set seconds and nanoseconds field of a timespec variable and
 452 * normalize to the timespec storage format
 453 *
 454 * Note: The tv_nsec part is always in the range of
 455 *      0 <= tv_nsec < NSEC_PER_SEC
 456 * For negative values only the tv_sec field is negative !
 457 */
 458void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
 459{
 460        while (nsec >= NSEC_PER_SEC) {
 461                /*
 462                 * The following asm() prevents the compiler from
 463                 * optimising this loop into a modulo operation. See
 464                 * also __iter_div_u64_rem() in include/linux/time.h
 465                 */
 466                asm("" : "+rm"(nsec));
 467                nsec -= NSEC_PER_SEC;
 468                ++sec;
 469        }
 470        while (nsec < 0) {
 471                asm("" : "+rm"(nsec));
 472                nsec += NSEC_PER_SEC;
 473                --sec;
 474        }
 475        ts->tv_sec = sec;
 476        ts->tv_nsec = nsec;
 477}
 478EXPORT_SYMBOL(set_normalized_timespec);
 479
 480/**
 481 * ns_to_timespec - Convert nanoseconds to timespec
 482 * @nsec:       the nanoseconds value to be converted
 483 *
 484 * Returns the timespec representation of the nsec parameter.
 485 */
 486struct timespec ns_to_timespec(const s64 nsec)
 487{
 488        struct timespec ts;
 489        s32 rem;
 490
 491        if (!nsec)
 492                return (struct timespec) {0, 0};
 493
 494        ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
 495        if (unlikely(rem < 0)) {
 496                ts.tv_sec--;
 497                rem += NSEC_PER_SEC;
 498        }
 499        ts.tv_nsec = rem;
 500
 501        return ts;
 502}
 503EXPORT_SYMBOL(ns_to_timespec);
 504
 505/**
 506 * ns_to_timeval - Convert nanoseconds to timeval
 507 * @nsec:       the nanoseconds value to be converted
 508 *
 509 * Returns the timeval representation of the nsec parameter.
 510 */
 511struct timeval ns_to_timeval(const s64 nsec)
 512{
 513        struct timespec ts = ns_to_timespec(nsec);
 514        struct timeval tv;
 515
 516        tv.tv_sec = ts.tv_sec;
 517        tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
 518
 519        return tv;
 520}
 521EXPORT_SYMBOL(ns_to_timeval);
 522
 523#if BITS_PER_LONG == 32
 524/**
 525 * set_normalized_timespec - set timespec sec and nsec parts and normalize
 526 *
 527 * @ts:         pointer to timespec variable to be set
 528 * @sec:        seconds to set
 529 * @nsec:       nanoseconds to set
 530 *
 531 * Set seconds and nanoseconds field of a timespec variable and
 532 * normalize to the timespec storage format
 533 *
 534 * Note: The tv_nsec part is always in the range of
 535 *      0 <= tv_nsec < NSEC_PER_SEC
 536 * For negative values only the tv_sec field is negative !
 537 */
 538void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
 539{
 540        while (nsec >= NSEC_PER_SEC) {
 541                /*
 542                 * The following asm() prevents the compiler from
 543                 * optimising this loop into a modulo operation. See
 544                 * also __iter_div_u64_rem() in include/linux/time.h
 545                 */
 546                asm("" : "+rm"(nsec));
 547                nsec -= NSEC_PER_SEC;
 548                ++sec;
 549        }
 550        while (nsec < 0) {
 551                asm("" : "+rm"(nsec));
 552                nsec += NSEC_PER_SEC;
 553                --sec;
 554        }
 555        ts->tv_sec = sec;
 556        ts->tv_nsec = nsec;
 557}
 558EXPORT_SYMBOL(set_normalized_timespec64);
 559
 560/**
 561 * ns_to_timespec64 - Convert nanoseconds to timespec64
 562 * @nsec:       the nanoseconds value to be converted
 563 *
 564 * Returns the timespec64 representation of the nsec parameter.
 565 */
 566struct timespec64 ns_to_timespec64(const s64 nsec)
 567{
 568        struct timespec64 ts;
 569        s32 rem;
 570
 571        if (!nsec)
 572                return (struct timespec64) {0, 0};
 573
 574        ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
 575        if (unlikely(rem < 0)) {
 576                ts.tv_sec--;
 577                rem += NSEC_PER_SEC;
 578        }
 579        ts.tv_nsec = rem;
 580
 581        return ts;
 582}
 583EXPORT_SYMBOL(ns_to_timespec64);
 584#endif
 585/**
 586 * msecs_to_jiffies: - convert milliseconds to jiffies
 587 * @m:  time in milliseconds
 588 *
 589 * conversion is done as follows:
 590 *
 591 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
 592 *
 593 * - 'too large' values [that would result in larger than
 594 *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
 595 *
 596 * - all other values are converted to jiffies by either multiplying
 597 *   the input value by a factor or dividing it with a factor and
 598 *   handling any 32-bit overflows.
 599 *   for the details see __msecs_to_jiffies()
 600 *
 601 * msecs_to_jiffies() checks for the passed in value being a constant
 602 * via __builtin_constant_p() allowing gcc to eliminate most of the
 603 * code, __msecs_to_jiffies() is called if the value passed does not
 604 * allow constant folding and the actual conversion must be done at
 605 * runtime.
 606 * the _msecs_to_jiffies helpers are the HZ dependent conversion
 607 * routines found in include/linux/jiffies.h
 608 */
 609unsigned long __msecs_to_jiffies(const unsigned int m)
 610{
 611        /*
 612         * Negative value, means infinite timeout:
 613         */
 614        if ((int)m < 0)
 615                return MAX_JIFFY_OFFSET;
 616        return _msecs_to_jiffies(m);
 617}
 618EXPORT_SYMBOL(__msecs_to_jiffies);
 619
 620unsigned long __usecs_to_jiffies(const unsigned int u)
 621{
 622        if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
 623                return MAX_JIFFY_OFFSET;
 624        return _usecs_to_jiffies(u);
 625}
 626EXPORT_SYMBOL(__usecs_to_jiffies);
 627
 628/*
 629 * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
 630 * that a remainder subtract here would not do the right thing as the
 631 * resolution values don't fall on second boundries.  I.e. the line:
 632 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
 633 * Note that due to the small error in the multiplier here, this
 634 * rounding is incorrect for sufficiently large values of tv_nsec, but
 635 * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
 636 * OK.
 637 *
 638 * Rather, we just shift the bits off the right.
 639 *
 640 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
 641 * value to a scaled second value.
 642 */
 643static unsigned long
 644__timespec64_to_jiffies(u64 sec, long nsec)
 645{
 646        nsec = nsec + TICK_NSEC - 1;
 647
 648        if (sec >= MAX_SEC_IN_JIFFIES){
 649                sec = MAX_SEC_IN_JIFFIES;
 650                nsec = 0;
 651        }
 652        return ((sec * SEC_CONVERSION) +
 653                (((u64)nsec * NSEC_CONVERSION) >>
 654                 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 655
 656}
 657
 658static unsigned long
 659__timespec_to_jiffies(unsigned long sec, long nsec)
 660{
 661        return __timespec64_to_jiffies((u64)sec, nsec);
 662}
 663
 664unsigned long
 665timespec64_to_jiffies(const struct timespec64 *value)
 666{
 667        return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
 668}
 669EXPORT_SYMBOL(timespec64_to_jiffies);
 670
 671void
 672jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
 673{
 674        /*
 675         * Convert jiffies to nanoseconds and separate with
 676         * one divide.
 677         */
 678        u32 rem;
 679        value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
 680                                    NSEC_PER_SEC, &rem);
 681        value->tv_nsec = rem;
 682}
 683EXPORT_SYMBOL(jiffies_to_timespec64);
 684
 685/*
 686 * We could use a similar algorithm to timespec_to_jiffies (with a
 687 * different multiplier for usec instead of nsec). But this has a
 688 * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
 689 * usec value, since it's not necessarily integral.
 690 *
 691 * We could instead round in the intermediate scaled representation
 692 * (i.e. in units of 1/2^(large scale) jiffies) but that's also
 693 * perilous: the scaling introduces a small positive error, which
 694 * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
 695 * units to the intermediate before shifting) leads to accidental
 696 * overflow and overestimates.
 697 *
 698 * At the cost of one additional multiplication by a constant, just
 699 * use the timespec implementation.
 700 */
 701unsigned long
 702timeval_to_jiffies(const struct timeval *value)
 703{
 704        return __timespec_to_jiffies(value->tv_sec,
 705                                     value->tv_usec * NSEC_PER_USEC);
 706}
 707EXPORT_SYMBOL(timeval_to_jiffies);
 708
 709void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
 710{
 711        /*
 712         * Convert jiffies to nanoseconds and separate with
 713         * one divide.
 714         */
 715        u32 rem;
 716
 717        value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
 718                                    NSEC_PER_SEC, &rem);
 719        value->tv_usec = rem / NSEC_PER_USEC;
 720}
 721EXPORT_SYMBOL(jiffies_to_timeval);
 722
 723/*
 724 * Convert jiffies/jiffies_64 to clock_t and back.
 725 */
 726clock_t jiffies_to_clock_t(unsigned long x)
 727{
 728#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
 729# if HZ < USER_HZ
 730        return x * (USER_HZ / HZ);
 731# else
 732        return x / (HZ / USER_HZ);
 733# endif
 734#else
 735        return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
 736#endif
 737}
 738EXPORT_SYMBOL(jiffies_to_clock_t);
 739
 740unsigned long clock_t_to_jiffies(unsigned long x)
 741{
 742#if (HZ % USER_HZ)==0
 743        if (x >= ~0UL / (HZ / USER_HZ))
 744                return ~0UL;
 745        return x * (HZ / USER_HZ);
 746#else
 747        /* Don't worry about loss of precision here .. */
 748        if (x >= ~0UL / HZ * USER_HZ)
 749                return ~0UL;
 750
 751        /* .. but do try to contain it here */
 752        return div_u64((u64)x * HZ, USER_HZ);
 753#endif
 754}
 755EXPORT_SYMBOL(clock_t_to_jiffies);
 756
 757u64 jiffies_64_to_clock_t(u64 x)
 758{
 759#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
 760# if HZ < USER_HZ
 761        x = div_u64(x * USER_HZ, HZ);
 762# elif HZ > USER_HZ
 763        x = div_u64(x, HZ / USER_HZ);
 764# else
 765        /* Nothing to do */
 766# endif
 767#else
 768        /*
 769         * There are better ways that don't overflow early,
 770         * but even this doesn't overflow in hundreds of years
 771         * in 64 bits, so..
 772         */
 773        x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
 774#endif
 775        return x;
 776}
 777EXPORT_SYMBOL(jiffies_64_to_clock_t);
 778
 779u64 nsec_to_clock_t(u64 x)
 780{
 781#if (NSEC_PER_SEC % USER_HZ) == 0
 782        return div_u64(x, NSEC_PER_SEC / USER_HZ);
 783#elif (USER_HZ % 512) == 0
 784        return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
 785#else
 786        /*
 787         * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
 788         * overflow after 64.99 years.
 789         * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
 790         */
 791        return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
 792#endif
 793}
 794
 795u64 jiffies64_to_nsecs(u64 j)
 796{
 797#if !(NSEC_PER_SEC % HZ)
 798        return (NSEC_PER_SEC / HZ) * j;
 799# else
 800        return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
 801#endif
 802}
 803EXPORT_SYMBOL(jiffies64_to_nsecs);
 804
 805/**
 806 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
 807 *
 808 * @n:  nsecs in u64
 809 *
 810 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
 811 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
 812 * for scheduler, not for use in device drivers to calculate timeout value.
 813 *
 814 * note:
 815 *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
 816 *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
 817 */
 818u64 nsecs_to_jiffies64(u64 n)
 819{
 820#if (NSEC_PER_SEC % HZ) == 0
 821        /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
 822        return div_u64(n, NSEC_PER_SEC / HZ);
 823#elif (HZ % 512) == 0
 824        /* overflow after 292 years if HZ = 1024 */
 825        return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
 826#else
 827        /*
 828         * Generic case - optimized for cases where HZ is a multiple of 3.
 829         * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
 830         */
 831        return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
 832#endif
 833}
 834EXPORT_SYMBOL(nsecs_to_jiffies64);
 835
 836/**
 837 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
 838 *
 839 * @n:  nsecs in u64
 840 *
 841 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
 842 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
 843 * for scheduler, not for use in device drivers to calculate timeout value.
 844 *
 845 * note:
 846 *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
 847 *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
 848 */
 849unsigned long nsecs_to_jiffies(u64 n)
 850{
 851        return (unsigned long)nsecs_to_jiffies64(n);
 852}
 853EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
 854
 855/*
 856 * Add two timespec values and do a safety check for overflow.
 857 * It's assumed that both values are valid (>= 0)
 858 */
 859struct timespec timespec_add_safe(const struct timespec lhs,
 860                                  const struct timespec rhs)
 861{
 862        struct timespec res;
 863
 864        set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
 865                                lhs.tv_nsec + rhs.tv_nsec);
 866
 867        if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
 868                res.tv_sec = TIME_T_MAX;
 869
 870        return res;
 871}
 872
 873/*
 874 * Add two timespec64 values and do a safety check for overflow.
 875 * It's assumed that both values are valid (>= 0).
 876 * And, each timespec64 is in normalized form.
 877 */
 878struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
 879                                const struct timespec64 rhs)
 880{
 881        struct timespec64 res;
 882
 883        set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
 884                        lhs.tv_nsec + rhs.tv_nsec);
 885
 886        if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
 887                res.tv_sec = TIME64_MAX;
 888                res.tv_nsec = 0;
 889        }
 890
 891        return res;
 892}
 893
 894int get_timespec64(struct timespec64 *ts,
 895                   const struct timespec __user *uts)
 896{
 897        struct timespec kts;
 898        int ret;
 899
 900        ret = copy_from_user(&kts, uts, sizeof(kts));
 901        if (ret)
 902                return -EFAULT;
 903
 904        ts->tv_sec = kts.tv_sec;
 905        ts->tv_nsec = kts.tv_nsec;
 906
 907        return 0;
 908}
 909EXPORT_SYMBOL_GPL(get_timespec64);
 910
 911int put_timespec64(const struct timespec64 *ts,
 912                   struct timespec __user *uts)
 913{
 914        struct timespec kts = {
 915                .tv_sec = ts->tv_sec,
 916                .tv_nsec = ts->tv_nsec
 917        };
 918        return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
 919}
 920EXPORT_SYMBOL_GPL(put_timespec64);
 921
 922int get_itimerspec64(struct itimerspec64 *it,
 923                        const struct itimerspec __user *uit)
 924{
 925        int ret;
 926
 927        ret = get_timespec64(&it->it_interval, &uit->it_interval);
 928        if (ret)
 929                return ret;
 930
 931        ret = get_timespec64(&it->it_value, &uit->it_value);
 932
 933        return ret;
 934}
 935EXPORT_SYMBOL_GPL(get_itimerspec64);
 936
 937int put_itimerspec64(const struct itimerspec64 *it,
 938                        struct itimerspec __user *uit)
 939{
 940        int ret;
 941
 942        ret = put_timespec64(&it->it_interval, &uit->it_interval);
 943        if (ret)
 944                return ret;
 945
 946        ret = put_timespec64(&it->it_value, &uit->it_value);
 947
 948        return ret;
 949}
 950EXPORT_SYMBOL_GPL(put_itimerspec64);
 951