linux/include/linux/time64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_TIME64_H
   3#define _LINUX_TIME64_H
   4
   5#include <linux/math64.h>
   6#include <vdso/time64.h>
   7
   8typedef __s64 time64_t;
   9typedef __u64 timeu64_t;
  10
  11#include <uapi/linux/time.h>
  12
  13struct timespec64 {
  14        time64_t        tv_sec;                 /* seconds */
  15        long            tv_nsec;                /* nanoseconds */
  16};
  17
  18struct itimerspec64 {
  19        struct timespec64 it_interval;
  20        struct timespec64 it_value;
  21};
  22
  23/* Located here for timespec[64]_valid_strict */
  24#define TIME64_MAX                      ((s64)~((u64)1 << 63))
  25#define TIME64_MIN                      (-TIME64_MAX - 1)
  26
  27#define KTIME_MAX                       ((s64)~((u64)1 << 63))
  28#define KTIME_MIN                       (-KTIME_MAX - 1)
  29#define KTIME_SEC_MAX                   (KTIME_MAX / NSEC_PER_SEC)
  30#define KTIME_SEC_MIN                   (KTIME_MIN / NSEC_PER_SEC)
  31
  32/*
  33 * Limits for settimeofday():
  34 *
  35 * To prevent setting the time close to the wraparound point time setting
  36 * is limited so a reasonable uptime can be accomodated. Uptime of 30 years
  37 * should be really sufficient, which means the cutoff is 2232. At that
  38 * point the cutoff is just a small part of the larger problem.
  39 */
  40#define TIME_UPTIME_SEC_MAX             (30LL * 365 * 24 *3600)
  41#define TIME_SETTOD_SEC_MAX             (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX)
  42
  43static inline int timespec64_equal(const struct timespec64 *a,
  44                                   const struct timespec64 *b)
  45{
  46        return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
  47}
  48
  49/*
  50 * lhs < rhs:  return <0
  51 * lhs == rhs: return 0
  52 * lhs > rhs:  return >0
  53 */
  54static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
  55{
  56        if (lhs->tv_sec < rhs->tv_sec)
  57                return -1;
  58        if (lhs->tv_sec > rhs->tv_sec)
  59                return 1;
  60        return lhs->tv_nsec - rhs->tv_nsec;
  61}
  62
  63extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
  64
  65static inline struct timespec64 timespec64_add(struct timespec64 lhs,
  66                                                struct timespec64 rhs)
  67{
  68        struct timespec64 ts_delta;
  69        set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
  70                                lhs.tv_nsec + rhs.tv_nsec);
  71        return ts_delta;
  72}
  73
  74/*
  75 * sub = lhs - rhs, in normalized form
  76 */
  77static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
  78                                                struct timespec64 rhs)
  79{
  80        struct timespec64 ts_delta;
  81        set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
  82                                lhs.tv_nsec - rhs.tv_nsec);
  83        return ts_delta;
  84}
  85
  86/*
  87 * Returns true if the timespec64 is norm, false if denorm:
  88 */
  89static inline bool timespec64_valid(const struct timespec64 *ts)
  90{
  91        /* Dates before 1970 are bogus */
  92        if (ts->tv_sec < 0)
  93                return false;
  94        /* Can't have more nanoseconds then a second */
  95        if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
  96                return false;
  97        return true;
  98}
  99
 100static inline bool timespec64_valid_strict(const struct timespec64 *ts)
 101{
 102        if (!timespec64_valid(ts))
 103                return false;
 104        /* Disallow values that could overflow ktime_t */
 105        if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
 106                return false;
 107        return true;
 108}
 109
 110static inline bool timespec64_valid_settod(const struct timespec64 *ts)
 111{
 112        if (!timespec64_valid(ts))
 113                return false;
 114        /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */
 115        if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX)
 116                return false;
 117        return true;
 118}
 119
 120/**
 121 * timespec64_to_ns - Convert timespec64 to nanoseconds
 122 * @ts:         pointer to the timespec64 variable to be converted
 123 *
 124 * Returns the scalar nanosecond representation of the timespec64
 125 * parameter.
 126 */
 127static inline s64 timespec64_to_ns(const struct timespec64 *ts)
 128{
 129        /* Prevent multiplication overflow / underflow */
 130        if (ts->tv_sec >= KTIME_SEC_MAX)
 131                return KTIME_MAX;
 132
 133        if (ts->tv_sec <= KTIME_SEC_MIN)
 134                return KTIME_MIN;
 135
 136        return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
 137}
 138
 139/**
 140 * ns_to_timespec64 - Convert nanoseconds to timespec64
 141 * @nsec:       the nanoseconds value to be converted
 142 *
 143 * Returns the timespec64 representation of the nsec parameter.
 144 */
 145extern struct timespec64 ns_to_timespec64(const s64 nsec);
 146
 147/**
 148 * timespec64_add_ns - Adds nanoseconds to a timespec64
 149 * @a:          pointer to timespec64 to be incremented
 150 * @ns:         unsigned nanoseconds value to be added
 151 *
 152 * This must always be inlined because its used from the x86-64 vdso,
 153 * which cannot call other kernel functions.
 154 */
 155static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
 156{
 157        a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
 158        a->tv_nsec = ns;
 159}
 160
 161/*
 162 * timespec64_add_safe assumes both values are positive and checks for
 163 * overflow. It will return TIME64_MAX in case of overflow.
 164 */
 165extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
 166                                         const struct timespec64 rhs);
 167
 168#endif /* _LINUX_TIME64_H */
 169