linux/kernel/time/vsyscall.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright 2019 ARM Ltd.
   4 *
   5 * Generic implementation of update_vsyscall and update_vsyscall_tz.
   6 *
   7 * Based on the x86 specific implementation.
   8 */
   9
  10#include <linux/hrtimer.h>
  11#include <linux/timekeeper_internal.h>
  12#include <vdso/datapage.h>
  13#include <vdso/helpers.h>
  14#include <vdso/vsyscall.h>
  15
  16static inline void update_vdso_data(struct vdso_data *vdata,
  17                                    struct timekeeper *tk)
  18{
  19        struct vdso_timestamp *vdso_ts;
  20        u64 nsec, sec;
  21
  22        vdata[CS_HRES_COARSE].cycle_last        = tk->tkr_mono.cycle_last;
  23        vdata[CS_HRES_COARSE].mask              = tk->tkr_mono.mask;
  24        vdata[CS_HRES_COARSE].mult              = tk->tkr_mono.mult;
  25        vdata[CS_HRES_COARSE].shift             = tk->tkr_mono.shift;
  26        vdata[CS_RAW].cycle_last                = tk->tkr_raw.cycle_last;
  27        vdata[CS_RAW].mask                      = tk->tkr_raw.mask;
  28        vdata[CS_RAW].mult                      = tk->tkr_raw.mult;
  29        vdata[CS_RAW].shift                     = tk->tkr_raw.shift;
  30
  31        /* CLOCK_MONOTONIC */
  32        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
  33        vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
  34
  35        nsec = tk->tkr_mono.xtime_nsec;
  36        nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
  37        while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
  38                nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
  39                vdso_ts->sec++;
  40        }
  41        vdso_ts->nsec   = nsec;
  42
  43        /* Copy MONOTONIC time for BOOTTIME */
  44        sec     = vdso_ts->sec;
  45        /* Add the boot offset */
  46        sec     += tk->monotonic_to_boot.tv_sec;
  47        nsec    += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
  48
  49        /* CLOCK_BOOTTIME */
  50        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
  51        vdso_ts->sec    = sec;
  52
  53        while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
  54                nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
  55                vdso_ts->sec++;
  56        }
  57        vdso_ts->nsec   = nsec;
  58
  59        /* CLOCK_MONOTONIC_RAW */
  60        vdso_ts         = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
  61        vdso_ts->sec    = tk->raw_sec;
  62        vdso_ts->nsec   = tk->tkr_raw.xtime_nsec;
  63
  64        /* CLOCK_TAI */
  65        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
  66        vdso_ts->sec    = tk->xtime_sec + (s64)tk->tai_offset;
  67        vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
  68}
  69
  70void update_vsyscall(struct timekeeper *tk)
  71{
  72        struct vdso_data *vdata = __arch_get_k_vdso_data();
  73        struct vdso_timestamp *vdso_ts;
  74        s32 clock_mode;
  75        u64 nsec;
  76
  77        /* copy vsyscall data */
  78        vdso_write_begin(vdata);
  79
  80        clock_mode = tk->tkr_mono.clock->vdso_clock_mode;
  81        vdata[CS_HRES_COARSE].clock_mode        = clock_mode;
  82        vdata[CS_RAW].clock_mode                = clock_mode;
  83
  84        /* CLOCK_REALTIME also required for time() */
  85        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
  86        vdso_ts->sec    = tk->xtime_sec;
  87        vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
  88
  89        /* CLOCK_REALTIME_COARSE */
  90        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
  91        vdso_ts->sec    = tk->xtime_sec;
  92        vdso_ts->nsec   = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
  93
  94        /* CLOCK_MONOTONIC_COARSE */
  95        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
  96        vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
  97        nsec            = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
  98        nsec            = nsec + tk->wall_to_monotonic.tv_nsec;
  99        vdso_ts->sec    += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
 100
 101        /*
 102         * Read without the seqlock held by clock_getres().
 103         * Note: No need to have a second copy.
 104         */
 105        WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
 106
 107        /*
 108         * If the current clocksource is not VDSO capable, then spare the
 109         * update of the high reolution parts.
 110         */
 111        if (clock_mode != VDSO_CLOCKMODE_NONE)
 112                update_vdso_data(vdata, tk);
 113
 114        __arch_update_vsyscall(vdata, tk);
 115
 116        vdso_write_end(vdata);
 117
 118        __arch_sync_vdso_data(vdata);
 119}
 120
 121void update_vsyscall_tz(void)
 122{
 123        struct vdso_data *vdata = __arch_get_k_vdso_data();
 124
 125        vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
 126        vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
 127
 128        __arch_sync_vdso_data(vdata);
 129}
 130