linux/arch/sparc/vdso/vclock_gettime.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2006 Andi Kleen, SUSE Labs.
   4 *
   5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
   6 *
   7 * The code should have no internal unresolved relocations.
   8 * Check with readelf after changing.
   9 * Also alternative() doesn't work.
  10 */
  11/*
  12 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/time.h>
  17#include <linux/string.h>
  18#include <asm/io.h>
  19#include <asm/unistd.h>
  20#include <asm/timex.h>
  21#include <asm/clocksource.h>
  22#include <asm/vvar.h>
  23
  24#ifdef  CONFIG_SPARC64
  25#define SYSCALL_STRING                                                  \
  26        "ta     0x6d;"                                                  \
  27        "bcs,a  1f;"                                                    \
  28        " sub   %%g0, %%o0, %%o0;"                                      \
  29        "1:"
  30#else
  31#define SYSCALL_STRING                                                  \
  32        "ta     0x10;"                                                  \
  33        "bcs,a  1f;"                                                    \
  34        " sub   %%g0, %%o0, %%o0;"                                      \
  35        "1:"
  36#endif
  37
  38#define SYSCALL_CLOBBERS                                                \
  39        "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",                 \
  40        "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",           \
  41        "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",         \
  42        "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",         \
  43        "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",         \
  44        "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",         \
  45        "cc", "memory"
  46
  47/*
  48 * Compute the vvar page's address in the process address space, and return it
  49 * as a pointer to the vvar_data.
  50 */
  51notrace static __always_inline struct vvar_data *get_vvar_data(void)
  52{
  53        unsigned long ret;
  54
  55        /*
  56         * vdso data page is the first vDSO page so grab the PC
  57         * and move up a page to get to the data page.
  58         */
  59        __asm__("rd %%pc, %0" : "=r" (ret));
  60        ret &= ~(8192 - 1);
  61        ret -= 8192;
  62
  63        return (struct vvar_data *) ret;
  64}
  65
  66notrace static long vdso_fallback_gettime(long clock, struct __kernel_old_timespec *ts)
  67{
  68        register long num __asm__("g1") = __NR_clock_gettime;
  69        register long o0 __asm__("o0") = clock;
  70        register long o1 __asm__("o1") = (long) ts;
  71
  72        __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
  73                             "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
  74        return o0;
  75}
  76
  77notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
  78{
  79        register long num __asm__("g1") = __NR_gettimeofday;
  80        register long o0 __asm__("o0") = (long) tv;
  81        register long o1 __asm__("o1") = (long) tz;
  82
  83        __asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
  84                             "0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
  85        return o0;
  86}
  87
  88#ifdef  CONFIG_SPARC64
  89notrace static __always_inline u64 vread_tick(void)
  90{
  91        u64     ret;
  92
  93        __asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
  94        return ret;
  95}
  96
  97notrace static __always_inline u64 vread_tick_stick(void)
  98{
  99        u64     ret;
 100
 101        __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
 102        return ret;
 103}
 104#else
 105notrace static __always_inline u64 vread_tick(void)
 106{
 107        register unsigned long long ret asm("o4");
 108
 109        __asm__ __volatile__("rd %%tick, %L0\n\t"
 110                             "srlx %L0, 32, %H0"
 111                             : "=r" (ret));
 112        return ret;
 113}
 114
 115notrace static __always_inline u64 vread_tick_stick(void)
 116{
 117        register unsigned long long ret asm("o4");
 118
 119        __asm__ __volatile__("rd %%asr24, %L0\n\t"
 120                             "srlx %L0, 32, %H0"
 121                             : "=r" (ret));
 122        return ret;
 123}
 124#endif
 125
 126notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
 127{
 128        u64 v;
 129        u64 cycles;
 130
 131        cycles = vread_tick();
 132        v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
 133        return v * vvar->clock.mult;
 134}
 135
 136notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar)
 137{
 138        u64 v;
 139        u64 cycles;
 140
 141        cycles = vread_tick_stick();
 142        v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
 143        return v * vvar->clock.mult;
 144}
 145
 146notrace static __always_inline int do_realtime(struct vvar_data *vvar,
 147                                               struct __kernel_old_timespec *ts)
 148{
 149        unsigned long seq;
 150        u64 ns;
 151
 152        do {
 153                seq = vvar_read_begin(vvar);
 154                ts->tv_sec = vvar->wall_time_sec;
 155                ns = vvar->wall_time_snsec;
 156                ns += vgetsns(vvar);
 157                ns >>= vvar->clock.shift;
 158        } while (unlikely(vvar_read_retry(vvar, seq)));
 159
 160        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
 161        ts->tv_nsec = ns;
 162
 163        return 0;
 164}
 165
 166notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
 167                                                     struct __kernel_old_timespec *ts)
 168{
 169        unsigned long seq;
 170        u64 ns;
 171
 172        do {
 173                seq = vvar_read_begin(vvar);
 174                ts->tv_sec = vvar->wall_time_sec;
 175                ns = vvar->wall_time_snsec;
 176                ns += vgetsns_stick(vvar);
 177                ns >>= vvar->clock.shift;
 178        } while (unlikely(vvar_read_retry(vvar, seq)));
 179
 180        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
 181        ts->tv_nsec = ns;
 182
 183        return 0;
 184}
 185
 186notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
 187                                                struct __kernel_old_timespec *ts)
 188{
 189        unsigned long seq;
 190        u64 ns;
 191
 192        do {
 193                seq = vvar_read_begin(vvar);
 194                ts->tv_sec = vvar->monotonic_time_sec;
 195                ns = vvar->monotonic_time_snsec;
 196                ns += vgetsns(vvar);
 197                ns >>= vvar->clock.shift;
 198        } while (unlikely(vvar_read_retry(vvar, seq)));
 199
 200        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
 201        ts->tv_nsec = ns;
 202
 203        return 0;
 204}
 205
 206notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
 207                                                      struct __kernel_old_timespec *ts)
 208{
 209        unsigned long seq;
 210        u64 ns;
 211
 212        do {
 213                seq = vvar_read_begin(vvar);
 214                ts->tv_sec = vvar->monotonic_time_sec;
 215                ns = vvar->monotonic_time_snsec;
 216                ns += vgetsns_stick(vvar);
 217                ns >>= vvar->clock.shift;
 218        } while (unlikely(vvar_read_retry(vvar, seq)));
 219
 220        ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
 221        ts->tv_nsec = ns;
 222
 223        return 0;
 224}
 225
 226notrace static int do_realtime_coarse(struct vvar_data *vvar,
 227                                      struct __kernel_old_timespec *ts)
 228{
 229        unsigned long seq;
 230
 231        do {
 232                seq = vvar_read_begin(vvar);
 233                ts->tv_sec = vvar->wall_time_coarse_sec;
 234                ts->tv_nsec = vvar->wall_time_coarse_nsec;
 235        } while (unlikely(vvar_read_retry(vvar, seq)));
 236        return 0;
 237}
 238
 239notrace static int do_monotonic_coarse(struct vvar_data *vvar,
 240                                       struct __kernel_old_timespec *ts)
 241{
 242        unsigned long seq;
 243
 244        do {
 245                seq = vvar_read_begin(vvar);
 246                ts->tv_sec = vvar->monotonic_time_coarse_sec;
 247                ts->tv_nsec = vvar->monotonic_time_coarse_nsec;
 248        } while (unlikely(vvar_read_retry(vvar, seq)));
 249
 250        return 0;
 251}
 252
 253notrace int
 254__vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
 255{
 256        struct vvar_data *vvd = get_vvar_data();
 257
 258        switch (clock) {
 259        case CLOCK_REALTIME:
 260                if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
 261                        break;
 262                return do_realtime(vvd, ts);
 263        case CLOCK_MONOTONIC:
 264                if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
 265                        break;
 266                return do_monotonic(vvd, ts);
 267        case CLOCK_REALTIME_COARSE:
 268                return do_realtime_coarse(vvd, ts);
 269        case CLOCK_MONOTONIC_COARSE:
 270                return do_monotonic_coarse(vvd, ts);
 271        }
 272        /*
 273         * Unknown clock ID ? Fall back to the syscall.
 274         */
 275        return vdso_fallback_gettime(clock, ts);
 276}
 277int
 278clock_gettime(clockid_t, struct __kernel_old_timespec *)
 279        __attribute__((weak, alias("__vdso_clock_gettime")));
 280
 281notrace int
 282__vdso_clock_gettime_stick(clockid_t clock, struct __kernel_old_timespec *ts)
 283{
 284        struct vvar_data *vvd = get_vvar_data();
 285
 286        switch (clock) {
 287        case CLOCK_REALTIME:
 288                if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
 289                        break;
 290                return do_realtime_stick(vvd, ts);
 291        case CLOCK_MONOTONIC:
 292                if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
 293                        break;
 294                return do_monotonic_stick(vvd, ts);
 295        case CLOCK_REALTIME_COARSE:
 296                return do_realtime_coarse(vvd, ts);
 297        case CLOCK_MONOTONIC_COARSE:
 298                return do_monotonic_coarse(vvd, ts);
 299        }
 300        /*
 301         * Unknown clock ID ? Fall back to the syscall.
 302         */
 303        return vdso_fallback_gettime(clock, ts);
 304}
 305
 306notrace int
 307__vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
 308{
 309        struct vvar_data *vvd = get_vvar_data();
 310
 311        if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
 312                if (likely(tv != NULL)) {
 313                        union tstv_t {
 314                                struct __kernel_old_timespec ts;
 315                                struct __kernel_old_timeval tv;
 316                        } *tstv = (union tstv_t *) tv;
 317                        do_realtime(vvd, &tstv->ts);
 318                        /*
 319                         * Assign before dividing to ensure that the division is
 320                         * done in the type of tv_usec, not tv_nsec.
 321                         *
 322                         * There cannot be > 1 billion usec in a second:
 323                         * do_realtime() has already distributed such overflow
 324                         * into tv_sec.  So we can assign it to an int safely.
 325                         */
 326                        tstv->tv.tv_usec = tstv->ts.tv_nsec;
 327                        tstv->tv.tv_usec /= 1000;
 328                }
 329                if (unlikely(tz != NULL)) {
 330                        /* Avoid memcpy. Some old compilers fail to inline it */
 331                        tz->tz_minuteswest = vvd->tz_minuteswest;
 332                        tz->tz_dsttime = vvd->tz_dsttime;
 333                }
 334                return 0;
 335        }
 336        return vdso_fallback_gettimeofday(tv, tz);
 337}
 338int
 339gettimeofday(struct __kernel_old_timeval *, struct timezone *)
 340        __attribute__((weak, alias("__vdso_gettimeofday")));
 341
 342notrace int
 343__vdso_gettimeofday_stick(struct __kernel_old_timeval *tv, struct timezone *tz)
 344{
 345        struct vvar_data *vvd = get_vvar_data();
 346
 347        if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
 348                if (likely(tv != NULL)) {
 349                        union tstv_t {
 350                                struct __kernel_old_timespec ts;
 351                                struct __kernel_old_timeval tv;
 352                        } *tstv = (union tstv_t *) tv;
 353                        do_realtime_stick(vvd, &tstv->ts);
 354                        /*
 355                         * Assign before dividing to ensure that the division is
 356                         * done in the type of tv_usec, not tv_nsec.
 357                         *
 358                         * There cannot be > 1 billion usec in a second:
 359                         * do_realtime() has already distributed such overflow
 360                         * into tv_sec.  So we can assign it to an int safely.
 361                         */
 362                        tstv->tv.tv_usec = tstv->ts.tv_nsec;
 363                        tstv->tv.tv_usec /= 1000;
 364                }
 365                if (unlikely(tz != NULL)) {
 366                        /* Avoid memcpy. Some old compilers fail to inline it */
 367                        tz->tz_minuteswest = vvd->tz_minuteswest;
 368                        tz->tz_dsttime = vvd->tz_dsttime;
 369                }
 370                return 0;
 371        }
 372        return vdso_fallback_gettimeofday(tv, tz);
 373}
 374