linux/kernel/time/time.c
<<
>>
Prefs
   1/*
   2 *  linux/kernel/time.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  This file contains the interface functions for the various
   7 *  time related system calls: time, stime, gettimeofday, settimeofday,
   8 *                             adjtime
   9 */
  10/*
  11 * Modification history kernel/time.c
  12 *
  13 * 1993-09-02    Philip Gladstone
  14 *      Created file with time related functions from sched/core.c and adjtimex()
  15 * 1993-10-08    Torsten Duwe
  16 *      adjtime interface update and CMOS clock write code
  17 * 1995-08-13    Torsten Duwe
  18 *      kernel PLL updated to 1994-12-13 specs (rfc-1589)
  19 * 1999-01-16    Ulrich Windl
  20 *      Introduced error checking for many cases in adjtimex().
  21 *      Updated NTP code according to technical memorandum Jan '96
  22 *      "A Kernel Model for Precision Timekeeping" by Dave Mills
  23 *      Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
  24 *      (Even though the technical memorandum forbids it)
  25 * 2004-07-14    Christoph Lameter
  26 *      Added getnstimeofday to allow the posix timer functions to return
  27 *      with nanosecond accuracy
  28 */
  29
  30#include <linux/export.h>
  31#include <linux/kernel.h>
  32#include <linux/timex.h>
  33#include <linux/capability.h>
  34#include <linux/timekeeper_internal.h>
  35#include <linux/errno.h>
  36#include <linux/syscalls.h>
  37#include <linux/security.h>
  38#include <linux/fs.h>
  39#include <linux/math64.h>
  40#include <linux/ptrace.h>
  41
  42#include <linux/uaccess.h>
  43#include <linux/compat.h>
  44#include <asm/unistd.h>
  45
  46#include <generated/timeconst.h>
  47#include "timekeeping.h"
  48
  49/*
  50 * The timezone where the local system is located.  Used as a default by some
  51 * programs who obtain this value by using gettimeofday.
  52 */
  53struct timezone sys_tz;
  54
  55EXPORT_SYMBOL(sys_tz);
  56
  57#ifdef __ARCH_WANT_SYS_TIME
  58
  59/*
  60 * sys_time() can be implemented in user-level using
  61 * sys_gettimeofday().  Is this for backwards compatibility?  If so,
  62 * why not move it into the appropriate arch directory (for those
  63 * architectures that need it).
  64 */
  65SYSCALL_DEFINE1(time, time_t __user *, tloc)
  66{
  67        time_t i = get_seconds();
  68
  69        if (tloc) {
  70                if (put_user(i,tloc))
  71                        return -EFAULT;
  72        }
  73        force_successful_syscall_return();
  74        return i;
  75}
  76
  77/*
  78 * sys_stime() can be implemented in user-level using
  79 * sys_settimeofday().  Is this for backwards compatibility?  If so,
  80 * why not move it into the appropriate arch directory (for those
  81 * architectures that need it).
  82 */
  83
  84SYSCALL_DEFINE1(stime, time_t __user *, tptr)
  85{
  86        struct timespec64 tv;
  87        int err;
  88
  89        if (get_user(tv.tv_sec, tptr))
  90                return -EFAULT;
  91
  92        tv.tv_nsec = 0;
  93
  94        err = security_settime64(&tv, NULL);
  95        if (err)
  96                return err;
  97
  98        do_settimeofday64(&tv);
  99        return 0;
 100}
 101
 102#endif /* __ARCH_WANT_SYS_TIME */
 103
 104#ifdef CONFIG_COMPAT
 105#ifdef __ARCH_WANT_COMPAT_SYS_TIME
 106
 107/* compat_time_t is a 32 bit "long" and needs to get converted. */
 108COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
 109{
 110        struct timeval tv;
 111        compat_time_t i;
 112
 113        do_gettimeofday(&tv);
 114        i = tv.tv_sec;
 115
 116        if (tloc) {
 117                if (put_user(i,tloc))
 118                        return -EFAULT;
 119        }
 120        force_successful_syscall_return();
 121        return i;
 122}
 123
 124COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
 125{
 126        struct timespec64 tv;
 127        int err;
 128
 129        if (get_user(tv.tv_sec, tptr))
 130                return -EFAULT;
 131
 132        tv.tv_nsec = 0;
 133
 134        err = security_settime64(&tv, NULL);
 135        if (err)
 136                return err;
 137
 138        do_settimeofday64(&tv);
 139        return 0;
 140}
 141
 142#endif /* __ARCH_WANT_COMPAT_SYS_TIME */
 143#endif
 144
 145SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
 146                struct timezone __user *, tz)
 147{
 148        if (likely(tv != NULL)) {
 149                struct timeval ktv;
 150                do_gettimeofday(&ktv);
 151                if (copy_to_user(tv, &ktv, sizeof(ktv)))
 152                        return -EFAULT;
 153        }
 154        if (unlikely(tz != NULL)) {
 155                if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
 156                        return -EFAULT;
 157        }
 158        return 0;
 159}
 160
 161/*
 162 * In case for some reason the CMOS clock has not already been running
 163 * in UTC, but in some local time: The first time we set the timezone,
 164 * we will warp the clock so that it is ticking UTC time instead of
 165 * local time. Presumably, if someone is setting the timezone then we
 166 * are running in an environment where the programs understand about
 167 * timezones. This should be done at boot time in the /etc/rc script,
 168 * as soon as possible, so that the clock can be set right. Otherwise,
 169 * various programs will get confused when the clock gets warped.
 170 */
 171
 172int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz)
 173{
 174        static int firsttime = 1;
 175        int error = 0;
 176
 177        if (tv && !timespec64_valid(tv))
 178                return -EINVAL;
 179
 180        error = security_settime64(tv, tz);
 181        if (error)
 182                return error;
 183
 184        if (tz) {
 185                /* Verify we're witin the +-15 hrs range */
 186                if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
 187                        return -EINVAL;
 188
 189                sys_tz = *tz;
 190                update_vsyscall_tz();
 191                if (firsttime) {
 192                        firsttime = 0;
 193                        if (!tv)
 194                                timekeeping_warp_clock();
 195                }
 196        }
 197        if (tv)
 198                return do_settimeofday64(tv);
 199        return 0;
 200}
 201
 202SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
 203                struct timezone __user *, tz)
 204{
 205        struct timespec64 new_ts;
 206        struct timeval user_tv;
 207        struct timezone new_tz;
 208
 209        if (tv) {
 210                if (copy_from_user(&user_tv, tv, sizeof(*tv)))
 211                        return -EFAULT;
 212
 213                if (!timeval_valid(&user_tv))
 214                        return -EINVAL;
 215
 216                new_ts.tv_sec = user_tv.tv_sec;
 217                new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
 218        }
 219        if (tz) {
 220                if (copy_from_user(&new_tz, tz, sizeof(*tz)))
 221                        return -EFAULT;
 222        }
 223
 224        return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
 225}
 226
 227#ifdef CONFIG_COMPAT
 228COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
 229                       struct timezone __user *, tz)
 230{
 231        if (tv) {
 232                struct timeval ktv;
 233
 234                do_gettimeofday(&ktv);
 235                if (compat_put_timeval(&ktv, tv))
 236                        return -EFAULT;
 237        }
 238        if (tz) {
 239                if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
 240                        return -EFAULT;
 241        }
 242
 243        return 0;
 244}
 245
 246COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
 247                       struct timezone __user *, tz)
 248{
 249        struct timespec64 new_ts;
 250        struct timeval user_tv;
 251        struct timezone new_tz;
 252
 253        if (tv) {
 254                if (compat_get_timeval(&user_tv, tv))
 255                        return -EFAULT;
 256                new_ts.tv_sec = user_tv.tv_sec;
 257                new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
 258        }
 259        if (tz) {
 260                if (copy_from_user(&new_tz, tz, sizeof(*tz)))
 261                        return -EFAULT;
 262        }
 263
 264        return do_sys_settimeofday64(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
 265}
 266#endif
 267
 268SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
 269{
 270        struct timex txc;               /* Local copy of parameter */
 271        int ret;
 272
 273        /* Copy the user data space into the kernel copy
 274         * structure. But bear in mind that the structures
 275         * may change
 276         */
 277        if (copy_from_user(&txc, txc_p, sizeof(struct timex)))
 278                return -EFAULT;
 279        ret = do_adjtimex(&txc);
 280        return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
 281}
 282
 283#ifdef CONFIG_COMPAT
 284
 285COMPAT_SYSCALL_DEFINE1(adjtimex, struct compat_timex __user *, utp)
 286{
 287        struct timex txc;
 288        int err, ret;
 289
 290        err = compat_get_timex(&txc, utp);
 291        if (err)
 292                return err;
 293
 294        ret = do_adjtimex(&txc);
 295
 296        err = compat_put_timex(utp, &txc);
 297        if (err)
 298                return err;
 299
 300        return ret;
 301}
 302#endif
 303
 304/*
 305 * Convert jiffies to milliseconds and back.
 306 *
 307 * Avoid unnecessary multiplications/divisions in the
 308 * two most common HZ cases:
 309 */
 310unsigned int jiffies_to_msecs(const unsigned long j)
 311{
 312#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 313        return (MSEC_PER_SEC / HZ) * j;
 314#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
 315        return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
 316#else
 317# if BITS_PER_LONG == 32
 318        return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
 319               HZ_TO_MSEC_SHR32;
 320# else
 321        return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
 322# endif
 323#endif
 324}
 325EXPORT_SYMBOL(jiffies_to_msecs);
 326
 327unsigned int jiffies_to_usecs(const unsigned long j)
 328{
 329        /*
 330         * Hz usually doesn't go much further MSEC_PER_SEC.
 331         * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
 332         */
 333        BUILD_BUG_ON(HZ > USEC_PER_SEC);
 334
 335#if !(USEC_PER_SEC % HZ)
 336        return (USEC_PER_SEC / HZ) * j;
 337#else
 338# if BITS_PER_LONG == 32
 339        return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
 340# else
 341        return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
 342# endif
 343#endif
 344}
 345EXPORT_SYMBOL(jiffies_to_usecs);
 346
 347/**
 348 * timespec_trunc - Truncate timespec to a granularity
 349 * @t: Timespec
 350 * @gran: Granularity in ns.
 351 *
 352 * Truncate a timespec to a granularity. Always rounds down. gran must
 353 * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
 354 */
 355struct timespec timespec_trunc(struct timespec t, unsigned gran)
 356{
 357        /* Avoid division in the common cases 1 ns and 1 s. */
 358        if (gran == 1) {
 359                /* nothing */
 360        } else if (gran == NSEC_PER_SEC) {
 361                t.tv_nsec = 0;
 362        } else if (gran > 1 && gran < NSEC_PER_SEC) {
 363                t.tv_nsec -= t.tv_nsec % gran;
 364        } else {
 365                WARN(1, "illegal file time granularity: %u", gran);
 366        }
 367        return t;
 368}
 369EXPORT_SYMBOL(timespec_trunc);
 370
 371/*
 372 * mktime64 - Converts date to seconds.
 373 * Converts Gregorian date to seconds since 1970-01-01 00:00:00.
 374 * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
 375 * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
 376 *
 377 * [For the Julian calendar (which was used in Russia before 1917,
 378 * Britain & colonies before 1752, anywhere else before 1582,
 379 * and is still in use by some communities) leave out the
 380 * -year/100+year/400 terms, and add 10.]
 381 *
 382 * This algorithm was first published by Gauss (I think).
 383 *
 384 * A leap second can be indicated by calling this function with sec as
 385 * 60 (allowable under ISO 8601).  The leap second is treated the same
 386 * as the following second since they don't exist in UNIX time.
 387 *
 388 * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight
 389 * tomorrow - (allowable under ISO 8601) is supported.
 390 */
 391time64_t mktime64(const unsigned int year0, const unsigned int mon0,
 392                const unsigned int day, const unsigned int hour,
 393                const unsigned int min, const unsigned int sec)
 394{
 395        unsigned int mon = mon0, year = year0;
 396
 397        /* 1..12 -> 11,12,1..10 */
 398        if (0 >= (int) (mon -= 2)) {
 399                mon += 12;      /* Puts Feb last since it has leap day */
 400                year -= 1;
 401        }
 402
 403        return ((((time64_t)
 404                  (year/4 - year/100 + year/400 + 367*mon/12 + day) +
 405                  year*365 - 719499
 406            )*24 + hour /* now have hours - midnight tomorrow handled here */
 407          )*60 + min /* now have minutes */
 408        )*60 + sec; /* finally seconds */
 409}
 410EXPORT_SYMBOL(mktime64);
 411
 412/**
 413 * set_normalized_timespec - set timespec sec and nsec parts and normalize
 414 *
 415 * @ts:         pointer to timespec variable to be set
 416 * @sec:        seconds to set
 417 * @nsec:       nanoseconds to set
 418 *
 419 * Set seconds and nanoseconds field of a timespec variable and
 420 * normalize to the timespec storage format
 421 *
 422 * Note: The tv_nsec part is always in the range of
 423 *      0 <= tv_nsec < NSEC_PER_SEC
 424 * For negative values only the tv_sec field is negative !
 425 */
 426void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
 427{
 428        while (nsec >= NSEC_PER_SEC) {
 429                /*
 430                 * The following asm() prevents the compiler from
 431                 * optimising this loop into a modulo operation. See
 432                 * also __iter_div_u64_rem() in include/linux/time.h
 433                 */
 434                asm("" : "+rm"(nsec));
 435                nsec -= NSEC_PER_SEC;
 436                ++sec;
 437        }
 438        while (nsec < 0) {
 439                asm("" : "+rm"(nsec));
 440                nsec += NSEC_PER_SEC;
 441                --sec;
 442        }
 443        ts->tv_sec = sec;
 444        ts->tv_nsec = nsec;
 445}
 446EXPORT_SYMBOL(set_normalized_timespec);
 447
 448/**
 449 * ns_to_timespec - Convert nanoseconds to timespec
 450 * @nsec:       the nanoseconds value to be converted
 451 *
 452 * Returns the timespec representation of the nsec parameter.
 453 */
 454struct timespec ns_to_timespec(const s64 nsec)
 455{
 456        struct timespec ts;
 457        s32 rem;
 458
 459        if (!nsec)
 460                return (struct timespec) {0, 0};
 461
 462        ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
 463        if (unlikely(rem < 0)) {
 464                ts.tv_sec--;
 465                rem += NSEC_PER_SEC;
 466        }
 467        ts.tv_nsec = rem;
 468
 469        return ts;
 470}
 471EXPORT_SYMBOL(ns_to_timespec);
 472
 473/**
 474 * ns_to_timeval - Convert nanoseconds to timeval
 475 * @nsec:       the nanoseconds value to be converted
 476 *
 477 * Returns the timeval representation of the nsec parameter.
 478 */
 479struct timeval ns_to_timeval(const s64 nsec)
 480{
 481        struct timespec ts = ns_to_timespec(nsec);
 482        struct timeval tv;
 483
 484        tv.tv_sec = ts.tv_sec;
 485        tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
 486
 487        return tv;
 488}
 489EXPORT_SYMBOL(ns_to_timeval);
 490
 491struct __kernel_old_timeval ns_to_kernel_old_timeval(const s64 nsec)
 492{
 493        struct timespec64 ts = ns_to_timespec64(nsec);
 494        struct __kernel_old_timeval tv;
 495
 496        tv.tv_sec = ts.tv_sec;
 497        tv.tv_usec = (suseconds_t)ts.tv_nsec / 1000;
 498
 499        return tv;
 500}
 501EXPORT_SYMBOL(ns_to_kernel_old_timeval);
 502
 503/**
 504 * set_normalized_timespec - set timespec sec and nsec parts and normalize
 505 *
 506 * @ts:         pointer to timespec variable to be set
 507 * @sec:        seconds to set
 508 * @nsec:       nanoseconds to set
 509 *
 510 * Set seconds and nanoseconds field of a timespec variable and
 511 * normalize to the timespec storage format
 512 *
 513 * Note: The tv_nsec part is always in the range of
 514 *      0 <= tv_nsec < NSEC_PER_SEC
 515 * For negative values only the tv_sec field is negative !
 516 */
 517void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec)
 518{
 519        while (nsec >= NSEC_PER_SEC) {
 520                /*
 521                 * The following asm() prevents the compiler from
 522                 * optimising this loop into a modulo operation. See
 523                 * also __iter_div_u64_rem() in include/linux/time.h
 524                 */
 525                asm("" : "+rm"(nsec));
 526                nsec -= NSEC_PER_SEC;
 527                ++sec;
 528        }
 529        while (nsec < 0) {
 530                asm("" : "+rm"(nsec));
 531                nsec += NSEC_PER_SEC;
 532                --sec;
 533        }
 534        ts->tv_sec = sec;
 535        ts->tv_nsec = nsec;
 536}
 537EXPORT_SYMBOL(set_normalized_timespec64);
 538
 539/**
 540 * ns_to_timespec64 - Convert nanoseconds to timespec64
 541 * @nsec:       the nanoseconds value to be converted
 542 *
 543 * Returns the timespec64 representation of the nsec parameter.
 544 */
 545struct timespec64 ns_to_timespec64(const s64 nsec)
 546{
 547        struct timespec64 ts;
 548        s32 rem;
 549
 550        if (!nsec)
 551                return (struct timespec64) {0, 0};
 552
 553        ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
 554        if (unlikely(rem < 0)) {
 555                ts.tv_sec--;
 556                rem += NSEC_PER_SEC;
 557        }
 558        ts.tv_nsec = rem;
 559
 560        return ts;
 561}
 562EXPORT_SYMBOL(ns_to_timespec64);
 563
 564/**
 565 * msecs_to_jiffies: - convert milliseconds to jiffies
 566 * @m:  time in milliseconds
 567 *
 568 * conversion is done as follows:
 569 *
 570 * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
 571 *
 572 * - 'too large' values [that would result in larger than
 573 *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
 574 *
 575 * - all other values are converted to jiffies by either multiplying
 576 *   the input value by a factor or dividing it with a factor and
 577 *   handling any 32-bit overflows.
 578 *   for the details see __msecs_to_jiffies()
 579 *
 580 * msecs_to_jiffies() checks for the passed in value being a constant
 581 * via __builtin_constant_p() allowing gcc to eliminate most of the
 582 * code, __msecs_to_jiffies() is called if the value passed does not
 583 * allow constant folding and the actual conversion must be done at
 584 * runtime.
 585 * the _msecs_to_jiffies helpers are the HZ dependent conversion
 586 * routines found in include/linux/jiffies.h
 587 */
 588unsigned long __msecs_to_jiffies(const unsigned int m)
 589{
 590        /*
 591         * Negative value, means infinite timeout:
 592         */
 593        if ((int)m < 0)
 594                return MAX_JIFFY_OFFSET;
 595        return _msecs_to_jiffies(m);
 596}
 597EXPORT_SYMBOL(__msecs_to_jiffies);
 598
 599unsigned long __usecs_to_jiffies(const unsigned int u)
 600{
 601        if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
 602                return MAX_JIFFY_OFFSET;
 603        return _usecs_to_jiffies(u);
 604}
 605EXPORT_SYMBOL(__usecs_to_jiffies);
 606
 607/*
 608 * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
 609 * that a remainder subtract here would not do the right thing as the
 610 * resolution values don't fall on second boundries.  I.e. the line:
 611 * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
 612 * Note that due to the small error in the multiplier here, this
 613 * rounding is incorrect for sufficiently large values of tv_nsec, but
 614 * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're
 615 * OK.
 616 *
 617 * Rather, we just shift the bits off the right.
 618 *
 619 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
 620 * value to a scaled second value.
 621 */
 622static unsigned long
 623__timespec64_to_jiffies(u64 sec, long nsec)
 624{
 625        nsec = nsec + TICK_NSEC - 1;
 626
 627        if (sec >= MAX_SEC_IN_JIFFIES){
 628                sec = MAX_SEC_IN_JIFFIES;
 629                nsec = 0;
 630        }
 631        return ((sec * SEC_CONVERSION) +
 632                (((u64)nsec * NSEC_CONVERSION) >>
 633                 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 634
 635}
 636
 637static unsigned long
 638__timespec_to_jiffies(unsigned long sec, long nsec)
 639{
 640        return __timespec64_to_jiffies((u64)sec, nsec);
 641}
 642
 643unsigned long
 644timespec64_to_jiffies(const struct timespec64 *value)
 645{
 646        return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
 647}
 648EXPORT_SYMBOL(timespec64_to_jiffies);
 649
 650void
 651jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
 652{
 653        /*
 654         * Convert jiffies to nanoseconds and separate with
 655         * one divide.
 656         */
 657        u32 rem;
 658        value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
 659                                    NSEC_PER_SEC, &rem);
 660        value->tv_nsec = rem;
 661}
 662EXPORT_SYMBOL(jiffies_to_timespec64);
 663
 664/*
 665 * We could use a similar algorithm to timespec_to_jiffies (with a
 666 * different multiplier for usec instead of nsec). But this has a
 667 * problem with rounding: we can't exactly add TICK_NSEC - 1 to the
 668 * usec value, since it's not necessarily integral.
 669 *
 670 * We could instead round in the intermediate scaled representation
 671 * (i.e. in units of 1/2^(large scale) jiffies) but that's also
 672 * perilous: the scaling introduces a small positive error, which
 673 * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1
 674 * units to the intermediate before shifting) leads to accidental
 675 * overflow and overestimates.
 676 *
 677 * At the cost of one additional multiplication by a constant, just
 678 * use the timespec implementation.
 679 */
 680unsigned long
 681timeval_to_jiffies(const struct timeval *value)
 682{
 683        return __timespec_to_jiffies(value->tv_sec,
 684                                     value->tv_usec * NSEC_PER_USEC);
 685}
 686EXPORT_SYMBOL(timeval_to_jiffies);
 687
 688void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
 689{
 690        /*
 691         * Convert jiffies to nanoseconds and separate with
 692         * one divide.
 693         */
 694        u32 rem;
 695
 696        value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
 697                                    NSEC_PER_SEC, &rem);
 698        value->tv_usec = rem / NSEC_PER_USEC;
 699}
 700EXPORT_SYMBOL(jiffies_to_timeval);
 701
 702/*
 703 * Convert jiffies/jiffies_64 to clock_t and back.
 704 */
 705clock_t jiffies_to_clock_t(unsigned long x)
 706{
 707#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
 708# if HZ < USER_HZ
 709        return x * (USER_HZ / HZ);
 710# else
 711        return x / (HZ / USER_HZ);
 712# endif
 713#else
 714        return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
 715#endif
 716}
 717EXPORT_SYMBOL(jiffies_to_clock_t);
 718
 719unsigned long clock_t_to_jiffies(unsigned long x)
 720{
 721#if (HZ % USER_HZ)==0
 722        if (x >= ~0UL / (HZ / USER_HZ))
 723                return ~0UL;
 724        return x * (HZ / USER_HZ);
 725#else
 726        /* Don't worry about loss of precision here .. */
 727        if (x >= ~0UL / HZ * USER_HZ)
 728                return ~0UL;
 729
 730        /* .. but do try to contain it here */
 731        return div_u64((u64)x * HZ, USER_HZ);
 732#endif
 733}
 734EXPORT_SYMBOL(clock_t_to_jiffies);
 735
 736u64 jiffies_64_to_clock_t(u64 x)
 737{
 738#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
 739# if HZ < USER_HZ
 740        x = div_u64(x * USER_HZ, HZ);
 741# elif HZ > USER_HZ
 742        x = div_u64(x, HZ / USER_HZ);
 743# else
 744        /* Nothing to do */
 745# endif
 746#else
 747        /*
 748         * There are better ways that don't overflow early,
 749         * but even this doesn't overflow in hundreds of years
 750         * in 64 bits, so..
 751         */
 752        x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
 753#endif
 754        return x;
 755}
 756EXPORT_SYMBOL(jiffies_64_to_clock_t);
 757
 758u64 nsec_to_clock_t(u64 x)
 759{
 760#if (NSEC_PER_SEC % USER_HZ) == 0
 761        return div_u64(x, NSEC_PER_SEC / USER_HZ);
 762#elif (USER_HZ % 512) == 0
 763        return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
 764#else
 765        /*
 766         * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
 767         * overflow after 64.99 years.
 768         * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
 769         */
 770        return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
 771#endif
 772}
 773
 774u64 jiffies64_to_nsecs(u64 j)
 775{
 776#if !(NSEC_PER_SEC % HZ)
 777        return (NSEC_PER_SEC / HZ) * j;
 778# else
 779        return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
 780#endif
 781}
 782EXPORT_SYMBOL(jiffies64_to_nsecs);
 783
 784/**
 785 * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
 786 *
 787 * @n:  nsecs in u64
 788 *
 789 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
 790 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
 791 * for scheduler, not for use in device drivers to calculate timeout value.
 792 *
 793 * note:
 794 *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
 795 *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
 796 */
 797u64 nsecs_to_jiffies64(u64 n)
 798{
 799#if (NSEC_PER_SEC % HZ) == 0
 800        /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
 801        return div_u64(n, NSEC_PER_SEC / HZ);
 802#elif (HZ % 512) == 0
 803        /* overflow after 292 years if HZ = 1024 */
 804        return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
 805#else
 806        /*
 807         * Generic case - optimized for cases where HZ is a multiple of 3.
 808         * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
 809         */
 810        return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
 811#endif
 812}
 813EXPORT_SYMBOL(nsecs_to_jiffies64);
 814
 815/**
 816 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
 817 *
 818 * @n:  nsecs in u64
 819 *
 820 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
 821 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
 822 * for scheduler, not for use in device drivers to calculate timeout value.
 823 *
 824 * note:
 825 *   NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
 826 *   ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
 827 */
 828unsigned long nsecs_to_jiffies(u64 n)
 829{
 830        return (unsigned long)nsecs_to_jiffies64(n);
 831}
 832EXPORT_SYMBOL_GPL(nsecs_to_jiffies);
 833
 834/*
 835 * Add two timespec64 values and do a safety check for overflow.
 836 * It's assumed that both values are valid (>= 0).
 837 * And, each timespec64 is in normalized form.
 838 */
 839struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
 840                                const struct timespec64 rhs)
 841{
 842        struct timespec64 res;
 843
 844        set_normalized_timespec64(&res, (timeu64_t) lhs.tv_sec + rhs.tv_sec,
 845                        lhs.tv_nsec + rhs.tv_nsec);
 846
 847        if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
 848                res.tv_sec = TIME64_MAX;
 849                res.tv_nsec = 0;
 850        }
 851
 852        return res;
 853}
 854
 855int get_timespec64(struct timespec64 *ts,
 856                   const struct __kernel_timespec __user *uts)
 857{
 858        struct __kernel_timespec kts;
 859        int ret;
 860
 861        ret = copy_from_user(&kts, uts, sizeof(kts));
 862        if (ret)
 863                return -EFAULT;
 864
 865        ts->tv_sec = kts.tv_sec;
 866
 867        /* Zero out the padding for 32 bit systems or in compat mode */
 868        if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()))
 869                kts.tv_nsec &= 0xFFFFFFFFUL;
 870
 871        ts->tv_nsec = kts.tv_nsec;
 872
 873        return 0;
 874}
 875EXPORT_SYMBOL_GPL(get_timespec64);
 876
 877int put_timespec64(const struct timespec64 *ts,
 878                   struct __kernel_timespec __user *uts)
 879{
 880        struct __kernel_timespec kts = {
 881                .tv_sec = ts->tv_sec,
 882                .tv_nsec = ts->tv_nsec
 883        };
 884
 885        return copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
 886}
 887EXPORT_SYMBOL_GPL(put_timespec64);
 888
 889int __compat_get_timespec64(struct timespec64 *ts64,
 890                                   const struct compat_timespec __user *cts)
 891{
 892        struct compat_timespec ts;
 893        int ret;
 894
 895        ret = copy_from_user(&ts, cts, sizeof(ts));
 896        if (ret)
 897                return -EFAULT;
 898
 899        ts64->tv_sec = ts.tv_sec;
 900        ts64->tv_nsec = ts.tv_nsec;
 901
 902        return 0;
 903}
 904
 905int __compat_put_timespec64(const struct timespec64 *ts64,
 906                                   struct compat_timespec __user *cts)
 907{
 908        struct compat_timespec ts = {
 909                .tv_sec = ts64->tv_sec,
 910                .tv_nsec = ts64->tv_nsec
 911        };
 912        return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0;
 913}
 914
 915int compat_get_timespec64(struct timespec64 *ts, const void __user *uts)
 916{
 917        if (COMPAT_USE_64BIT_TIME)
 918                return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
 919        else
 920                return __compat_get_timespec64(ts, uts);
 921}
 922EXPORT_SYMBOL_GPL(compat_get_timespec64);
 923
 924int compat_put_timespec64(const struct timespec64 *ts, void __user *uts)
 925{
 926        if (COMPAT_USE_64BIT_TIME)
 927                return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
 928        else
 929                return __compat_put_timespec64(ts, uts);
 930}
 931EXPORT_SYMBOL_GPL(compat_put_timespec64);
 932
 933int get_itimerspec64(struct itimerspec64 *it,
 934                        const struct itimerspec __user *uit)
 935{
 936        int ret;
 937
 938        ret = get_timespec64(&it->it_interval, &uit->it_interval);
 939        if (ret)
 940                return ret;
 941
 942        ret = get_timespec64(&it->it_value, &uit->it_value);
 943
 944        return ret;
 945}
 946EXPORT_SYMBOL_GPL(get_itimerspec64);
 947
 948int put_itimerspec64(const struct itimerspec64 *it,
 949                        struct itimerspec __user *uit)
 950{
 951        int ret;
 952
 953        ret = put_timespec64(&it->it_interval, &uit->it_interval);
 954        if (ret)
 955                return ret;
 956
 957        ret = put_timespec64(&it->it_value, &uit->it_value);
 958
 959        return ret;
 960}
 961EXPORT_SYMBOL_GPL(put_itimerspec64);
 962