linux/arch/x86/vdso/vclock_gettime.c
<<
>>
Prefs
   1/*
   2 * Copyright 2006 Andi Kleen, SUSE Labs.
   3 * Subject to the GNU Public License, v.2
   4 *
   5 * Fast user context implementation of clock_gettime and gettimeofday.
   6 *
   7 * The code should have no internal unresolved relocations.
   8 * Check with readelf after changing.
   9 * Also alternative() doesn't work.
  10 */
  11
  12/* Disable profiling for userspace code: */
  13#define DISABLE_BRANCH_PROFILING
  14
  15#include <linux/kernel.h>
  16#include <linux/posix-timers.h>
  17#include <linux/time.h>
  18#include <linux/string.h>
  19#include <asm/vsyscall.h>
  20#include <asm/vgtod.h>
  21#include <asm/timex.h>
  22#include <asm/hpet.h>
  23#include <asm/unistd.h>
  24#include <asm/io.h>
  25#include "vextern.h"
  26
  27#define gtod vdso_vsyscall_gtod_data
  28
  29notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
  30{
  31        long ret;
  32        asm("syscall" : "=a" (ret) :
  33            "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
  34        return ret;
  35}
  36
  37notrace static inline long vgetns(void)
  38{
  39        long v;
  40        cycles_t (*vread)(void);
  41        vread = gtod->clock.vread;
  42        v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
  43        return (v * gtod->clock.mult) >> gtod->clock.shift;
  44}
  45
  46notrace static noinline int do_realtime(struct timespec *ts)
  47{
  48        unsigned long seq, ns;
  49        do {
  50                seq = read_seqbegin(&gtod->lock);
  51                ts->tv_sec = gtod->wall_time_sec;
  52                ts->tv_nsec = gtod->wall_time_nsec;
  53                ns = vgetns();
  54        } while (unlikely(read_seqretry(&gtod->lock, seq)));
  55        timespec_add_ns(ts, ns);
  56        return 0;
  57}
  58
  59/* Copy of the version in kernel/time.c which we cannot directly access */
  60notrace static void
  61vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
  62{
  63        while (nsec >= NSEC_PER_SEC) {
  64                nsec -= NSEC_PER_SEC;
  65                ++sec;
  66        }
  67        while (nsec < 0) {
  68                nsec += NSEC_PER_SEC;
  69                --sec;
  70        }
  71        ts->tv_sec = sec;
  72        ts->tv_nsec = nsec;
  73}
  74
  75notrace static noinline int do_monotonic(struct timespec *ts)
  76{
  77        unsigned long seq, ns, secs;
  78        do {
  79                seq = read_seqbegin(&gtod->lock);
  80                secs = gtod->wall_time_sec;
  81                ns = gtod->wall_time_nsec + vgetns();
  82                secs += gtod->wall_to_monotonic.tv_sec;
  83                ns += gtod->wall_to_monotonic.tv_nsec;
  84        } while (unlikely(read_seqretry(&gtod->lock, seq)));
  85        vset_normalized_timespec(ts, secs, ns);
  86        return 0;
  87}
  88
  89notrace static noinline int do_realtime_coarse(struct timespec *ts)
  90{
  91        unsigned long seq;
  92        do {
  93                seq = read_seqbegin(&gtod->lock);
  94                ts->tv_sec = gtod->wall_time_coarse.tv_sec;
  95                ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
  96        } while (unlikely(read_seqretry(&gtod->lock, seq)));
  97        return 0;
  98}
  99
 100notrace static noinline int do_monotonic_coarse(struct timespec *ts)
 101{
 102        unsigned long seq, ns, secs;
 103        do {
 104                seq = read_seqbegin(&gtod->lock);
 105                secs = gtod->wall_time_coarse.tv_sec;
 106                ns = gtod->wall_time_coarse.tv_nsec;
 107                secs += gtod->wall_to_monotonic.tv_sec;
 108                ns += gtod->wall_to_monotonic.tv_nsec;
 109        } while (unlikely(read_seqretry(&gtod->lock, seq)));
 110        vset_normalized_timespec(ts, secs, ns);
 111        return 0;
 112}
 113
 114notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
 115{
 116        if (likely(gtod->sysctl_enabled))
 117                switch (clock) {
 118                case CLOCK_REALTIME:
 119                        if (likely(gtod->clock.vread))
 120                                return do_realtime(ts);
 121                        break;
 122                case CLOCK_MONOTONIC:
 123                        if (likely(gtod->clock.vread))
 124                                return do_monotonic(ts);
 125                        break;
 126                case CLOCK_REALTIME_COARSE:
 127                        return do_realtime_coarse(ts);
 128                case CLOCK_MONOTONIC_COARSE:
 129                        return do_monotonic_coarse(ts);
 130                }
 131        return vdso_fallback_gettime(clock, ts);
 132}
 133int clock_gettime(clockid_t, struct timespec *)
 134        __attribute__((weak, alias("__vdso_clock_gettime")));
 135
 136notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
 137{
 138        long ret;
 139        if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
 140                if (likely(tv != NULL)) {
 141                        BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
 142                                     offsetof(struct timespec, tv_nsec) ||
 143                                     sizeof(*tv) != sizeof(struct timespec));
 144                        do_realtime((struct timespec *)tv);
 145                        tv->tv_usec /= 1000;
 146                }
 147                if (unlikely(tz != NULL)) {
 148                        /* Avoid memcpy. Some old compilers fail to inline it */
 149                        tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
 150                        tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
 151                }
 152                return 0;
 153        }
 154        asm("syscall" : "=a" (ret) :
 155            "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
 156        return ret;
 157}
 158int gettimeofday(struct timeval *, struct timezone *)
 159        __attribute__((weak, alias("__vdso_gettimeofday")));
 160