linux/kernel/time/vsyscall.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright 2019 ARM Ltd.
   4 *
   5 * Generic implementation of update_vsyscall and update_vsyscall_tz.
   6 *
   7 * Based on the x86 specific implementation.
   8 */
   9
  10#include <linux/hrtimer.h>
  11#include <linux/timekeeper_internal.h>
  12#include <vdso/datapage.h>
  13#include <vdso/helpers.h>
  14#include <vdso/vsyscall.h>
  15
  16#include "timekeeping_internal.h"
  17
  18static inline void update_vdso_data(struct vdso_data *vdata,
  19                                    struct timekeeper *tk)
  20{
  21        struct vdso_timestamp *vdso_ts;
  22        u64 nsec, sec;
  23
  24        vdata[CS_HRES_COARSE].cycle_last        = tk->tkr_mono.cycle_last;
  25        vdata[CS_HRES_COARSE].mask              = tk->tkr_mono.mask;
  26        vdata[CS_HRES_COARSE].mult              = tk->tkr_mono.mult;
  27        vdata[CS_HRES_COARSE].shift             = tk->tkr_mono.shift;
  28        vdata[CS_RAW].cycle_last                = tk->tkr_raw.cycle_last;
  29        vdata[CS_RAW].mask                      = tk->tkr_raw.mask;
  30        vdata[CS_RAW].mult                      = tk->tkr_raw.mult;
  31        vdata[CS_RAW].shift                     = tk->tkr_raw.shift;
  32
  33        /* CLOCK_MONOTONIC */
  34        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
  35        vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
  36
  37        nsec = tk->tkr_mono.xtime_nsec;
  38        nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
  39        while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
  40                nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
  41                vdso_ts->sec++;
  42        }
  43        vdso_ts->nsec   = nsec;
  44
  45        /* Copy MONOTONIC time for BOOTTIME */
  46        sec     = vdso_ts->sec;
  47        /* Add the boot offset */
  48        sec     += tk->monotonic_to_boot.tv_sec;
  49        nsec    += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
  50
  51        /* CLOCK_BOOTTIME */
  52        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME];
  53        vdso_ts->sec    = sec;
  54
  55        while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
  56                nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
  57                vdso_ts->sec++;
  58        }
  59        vdso_ts->nsec   = nsec;
  60
  61        /* CLOCK_MONOTONIC_RAW */
  62        vdso_ts         = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW];
  63        vdso_ts->sec    = tk->raw_sec;
  64        vdso_ts->nsec   = tk->tkr_raw.xtime_nsec;
  65
  66        /* CLOCK_TAI */
  67        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
  68        vdso_ts->sec    = tk->xtime_sec + (s64)tk->tai_offset;
  69        vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
  70}
  71
  72void update_vsyscall(struct timekeeper *tk)
  73{
  74        struct vdso_data *vdata = __arch_get_k_vdso_data();
  75        struct vdso_timestamp *vdso_ts;
  76        s32 clock_mode;
  77        u64 nsec;
  78
  79        /* copy vsyscall data */
  80        vdso_write_begin(vdata);
  81
  82        clock_mode = tk->tkr_mono.clock->vdso_clock_mode;
  83        vdata[CS_HRES_COARSE].clock_mode        = clock_mode;
  84        vdata[CS_RAW].clock_mode                = clock_mode;
  85
  86        /* CLOCK_REALTIME also required for time() */
  87        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
  88        vdso_ts->sec    = tk->xtime_sec;
  89        vdso_ts->nsec   = tk->tkr_mono.xtime_nsec;
  90
  91        /* CLOCK_REALTIME_COARSE */
  92        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
  93        vdso_ts->sec    = tk->xtime_sec;
  94        vdso_ts->nsec   = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
  95
  96        /* CLOCK_MONOTONIC_COARSE */
  97        vdso_ts         = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
  98        vdso_ts->sec    = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
  99        nsec            = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
 100        nsec            = nsec + tk->wall_to_monotonic.tv_nsec;
 101        vdso_ts->sec    += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
 102
 103        /*
 104         * Read without the seqlock held by clock_getres().
 105         * Note: No need to have a second copy.
 106         */
 107        WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
 108
 109        /*
 110         * If the current clocksource is not VDSO capable, then spare the
 111         * update of the high resolution parts.
 112         */
 113        if (clock_mode != VDSO_CLOCKMODE_NONE)
 114                update_vdso_data(vdata, tk);
 115
 116        __arch_update_vsyscall(vdata, tk);
 117
 118        vdso_write_end(vdata);
 119
 120        __arch_sync_vdso_data(vdata);
 121}
 122
 123void update_vsyscall_tz(void)
 124{
 125        struct vdso_data *vdata = __arch_get_k_vdso_data();
 126
 127        vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest;
 128        vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime;
 129
 130        __arch_sync_vdso_data(vdata);
 131}
 132
 133/**
 134 * vdso_update_begin - Start of a VDSO update section
 135 *
 136 * Allows architecture code to safely update the architecture specific VDSO
 137 * data. Disables interrupts, acquires timekeeper lock to serialize against
 138 * concurrent updates from timekeeping and invalidates the VDSO data
 139 * sequence counter to prevent concurrent readers from accessing
 140 * inconsistent data.
 141 *
 142 * Returns: Saved interrupt flags which need to be handed in to
 143 * vdso_update_end().
 144 */
 145unsigned long vdso_update_begin(void)
 146{
 147        struct vdso_data *vdata = __arch_get_k_vdso_data();
 148        unsigned long flags;
 149
 150        raw_spin_lock_irqsave(&timekeeper_lock, flags);
 151        vdso_write_begin(vdata);
 152        return flags;
 153}
 154
 155/**
 156 * vdso_update_end - End of a VDSO update section
 157 * @flags:      Interrupt flags as returned from vdso_update_begin()
 158 *
 159 * Pairs with vdso_update_begin(). Marks vdso data consistent, invokes data
 160 * synchronization if the architecture requires it, drops timekeeper lock
 161 * and restores interrupt flags.
 162 */
 163void vdso_update_end(unsigned long flags)
 164{
 165        struct vdso_data *vdata = __arch_get_k_vdso_data();
 166
 167        vdso_write_end(vdata);
 168        __arch_sync_vdso_data(vdata);
 169        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 170}
 171