linux/arch/x86/vdso/vclock_gettime.c
<<
>>
Prefs
   1/*
   2 * Copyright 2006 Andi Kleen, SUSE Labs.
   3 * Subject to the GNU Public License, v.2
   4 *
   5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
   6 *
   7 * The code should have no internal unresolved relocations.
   8 * Check with readelf after changing.
   9 */
  10
  11/* Disable profiling for userspace code: */
  12#define DISABLE_BRANCH_PROFILING
  13
  14#include <linux/kernel.h>
  15#include <linux/posix-timers.h>
  16#include <linux/time.h>
  17#include <linux/string.h>
  18#include <asm/vsyscall.h>
  19#include <asm/fixmap.h>
  20#include <asm/vgtod.h>
  21#include <asm/timex.h>
  22#include <asm/hpet.h>
  23#include <asm/unistd.h>
  24#include <asm/io.h>
  25#include <asm/pvclock.h>
  26
  27#define gtod (&VVAR(vsyscall_gtod_data))
  28
  29notrace static cycle_t vread_tsc(void)
  30{
  31        cycle_t ret;
  32        u64 last;
  33
  34        /*
  35         * Empirically, a fence (of type that depends on the CPU)
  36         * before rdtsc is enough to ensure that rdtsc is ordered
  37         * with respect to loads.  The various CPU manuals are unclear
  38         * as to whether rdtsc can be reordered with later loads,
  39         * but no one has ever seen it happen.
  40         */
  41        rdtsc_barrier();
  42        ret = (cycle_t)vget_cycles();
  43
  44        last = VVAR(vsyscall_gtod_data).clock.cycle_last;
  45
  46        if (likely(ret >= last))
  47                return ret;
  48
  49        /*
  50         * GCC likes to generate cmov here, but this branch is extremely
  51         * predictable (it's just a funciton of time and the likely is
  52         * very likely) and there's a data dependence, so force GCC
  53         * to generate a branch instead.  I don't barrier() because
  54         * we don't actually need a barrier, and if this function
  55         * ever gets inlined it will generate worse code.
  56         */
  57        asm volatile ("");
  58        return last;
  59}
  60
  61static notrace cycle_t vread_hpet(void)
  62{
  63        return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
  64}
  65
  66#ifdef CONFIG_PARAVIRT_CLOCK
  67
  68static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu)
  69{
  70        const struct pvclock_vsyscall_time_info *pvti_base;
  71        int idx = cpu / (PAGE_SIZE/PVTI_SIZE);
  72        int offset = cpu % (PAGE_SIZE/PVTI_SIZE);
  73
  74        BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END);
  75
  76        pvti_base = (struct pvclock_vsyscall_time_info *)
  77                    __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx);
  78
  79        return &pvti_base[offset];
  80}
  81
  82static notrace cycle_t vread_pvclock(int *mode)
  83{
  84        const struct pvclock_vsyscall_time_info *pvti;
  85        cycle_t ret;
  86        u64 last;
  87        u32 version;
  88        u8 flags;
  89        unsigned cpu, cpu1;
  90
  91
  92        /*
  93         * Note: hypervisor must guarantee that:
  94         * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
  95         * 2. that per-CPU pvclock time info is updated if the
  96         *    underlying CPU changes.
  97         * 3. that version is increased whenever underlying CPU
  98         *    changes.
  99         *
 100         */
 101        do {
 102                cpu = __getcpu() & VGETCPU_CPU_MASK;
 103                /* TODO: We can put vcpu id into higher bits of pvti.version.
 104                 * This will save a couple of cycles by getting rid of
 105                 * __getcpu() calls (Gleb).
 106                 */
 107
 108                pvti = get_pvti(cpu);
 109
 110                version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
 111
 112                /*
 113                 * Test we're still on the cpu as well as the version.
 114                 * We could have been migrated just after the first
 115                 * vgetcpu but before fetching the version, so we
 116                 * wouldn't notice a version change.
 117                 */
 118                cpu1 = __getcpu() & VGETCPU_CPU_MASK;
 119        } while (unlikely(cpu != cpu1 ||
 120                          (pvti->pvti.version & 1) ||
 121                          pvti->pvti.version != version));
 122
 123        if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
 124                *mode = VCLOCK_NONE;
 125
 126        /* refer to tsc.c read_tsc() comment for rationale */
 127        last = VVAR(vsyscall_gtod_data).clock.cycle_last;
 128
 129        if (likely(ret >= last))
 130                return ret;
 131
 132        return last;
 133}
 134#endif
 135
 136notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 137{
 138        long ret;
 139        asm("syscall" : "=a" (ret) :
 140            "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
 141        return ret;
 142}
 143
 144notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 145{
 146        long ret;
 147
 148        asm("syscall" : "=a" (ret) :
 149            "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
 150        return ret;
 151}
 152
 153
 154notrace static inline u64 vgetsns(int *mode)
 155{
 156        long v;
 157        cycles_t cycles;
 158        if (gtod->clock.vclock_mode == VCLOCK_TSC)
 159                cycles = vread_tsc();
 160        else if (gtod->clock.vclock_mode == VCLOCK_HPET)
 161                cycles = vread_hpet();
 162#ifdef CONFIG_PARAVIRT_CLOCK
 163        else if (gtod->clock.vclock_mode == VCLOCK_PVCLOCK)
 164                cycles = vread_pvclock(mode);
 165#endif
 166        else
 167                return 0;
 168        v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask;
 169        return v * gtod->clock.mult;
 170}
 171
 172/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
 173notrace static int __always_inline do_realtime(struct timespec *ts)
 174{
 175        unsigned long seq;
 176        u64 ns;
 177        int mode;
 178
 179        ts->tv_nsec = 0;
 180        do {
 181                seq = read_seqcount_begin(&gtod->seq);
 182                mode = gtod->clock.vclock_mode;
 183                ts->tv_sec = gtod->wall_time_sec;
 184                ns = gtod->wall_time_snsec;
 185                ns += vgetsns(&mode);
 186                ns >>= gtod->clock.shift;
 187        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 188
 189        timespec_add_ns(ts, ns);
 190        return mode;
 191}
 192
 193notrace static int do_monotonic(struct timespec *ts)
 194{
 195        unsigned long seq;
 196        u64 ns;
 197        int mode;
 198
 199        ts->tv_nsec = 0;
 200        do {
 201                seq = read_seqcount_begin(&gtod->seq);
 202                mode = gtod->clock.vclock_mode;
 203                ts->tv_sec = gtod->monotonic_time_sec;
 204                ns = gtod->monotonic_time_snsec;
 205                ns += vgetsns(&mode);
 206                ns >>= gtod->clock.shift;
 207        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 208        timespec_add_ns(ts, ns);
 209
 210        return mode;
 211}
 212
 213notrace static int do_realtime_coarse(struct timespec *ts)
 214{
 215        unsigned long seq;
 216        do {
 217                seq = read_seqcount_begin(&gtod->seq);
 218                ts->tv_sec = gtod->wall_time_coarse.tv_sec;
 219                ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
 220        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 221        return 0;
 222}
 223
 224notrace static int do_monotonic_coarse(struct timespec *ts)
 225{
 226        unsigned long seq;
 227        do {
 228                seq = read_seqcount_begin(&gtod->seq);
 229                ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
 230                ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
 231        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 232
 233        return 0;
 234}
 235
 236notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
 237{
 238        int ret = VCLOCK_NONE;
 239
 240        switch (clock) {
 241        case CLOCK_REALTIME:
 242                ret = do_realtime(ts);
 243                break;
 244        case CLOCK_MONOTONIC:
 245                ret = do_monotonic(ts);
 246                break;
 247        case CLOCK_REALTIME_COARSE:
 248                return do_realtime_coarse(ts);
 249        case CLOCK_MONOTONIC_COARSE:
 250                return do_monotonic_coarse(ts);
 251        }
 252
 253        if (ret == VCLOCK_NONE)
 254                return vdso_fallback_gettime(clock, ts);
 255        return 0;
 256}
 257int clock_gettime(clockid_t, struct timespec *)
 258        __attribute__((weak, alias("__vdso_clock_gettime")));
 259
 260notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
 261{
 262        long ret = VCLOCK_NONE;
 263
 264        if (likely(tv != NULL)) {
 265                BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
 266                             offsetof(struct timespec, tv_nsec) ||
 267                             sizeof(*tv) != sizeof(struct timespec));
 268                ret = do_realtime((struct timespec *)tv);
 269                tv->tv_usec /= 1000;
 270        }
 271        if (unlikely(tz != NULL)) {
 272                /* Avoid memcpy. Some old compilers fail to inline it */
 273                tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
 274                tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
 275        }
 276
 277        if (ret == VCLOCK_NONE)
 278                return vdso_fallback_gtod(tv, tz);
 279        return 0;
 280}
 281int gettimeofday(struct timeval *, struct timezone *)
 282        __attribute__((weak, alias("__vdso_gettimeofday")));
 283
 284/*
 285 * This will break when the xtime seconds get inaccurate, but that is
 286 * unlikely
 287 */
 288notrace time_t __vdso_time(time_t *t)
 289{
 290        /* This is atomic on x86_64 so we don't need any locks. */
 291        time_t result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
 292
 293        if (t)
 294                *t = result;
 295        return result;
 296}
 297int time(time_t *t)
 298        __attribute__((weak, alias("__vdso_time")));
 299