linux/arch/arm64/kernel/vdso/gettimeofday.S
<<
>>
Prefs
   1/*
   2 * Userspace implementations of gettimeofday() and friends.
   3 *
   4 * Copyright (C) 2012 ARM Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * Author: Will Deacon <will.deacon@arm.com>
  19 */
  20
  21#include <linux/linkage.h>
  22#include <asm/asm-offsets.h>
  23#include <asm/unistd.h>
  24
  25#define NSEC_PER_SEC_LO16       0xca00
  26#define NSEC_PER_SEC_HI16       0x3b9a
  27
  28vdso_data       .req    x6
  29seqcnt          .req    w7
  30w_tmp           .req    w8
  31x_tmp           .req    x8
  32
  33/*
  34 * Conventions for macro arguments:
  35 * - An argument is write-only if its name starts with "res".
  36 * - All other arguments are read-only, unless otherwise specified.
  37 */
  38
  39        .macro  seqcnt_acquire
  409999:   ldr     seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
  41        tbnz    seqcnt, #0, 9999b
  42        dmb     ishld
  43        .endm
  44
  45        .macro  seqcnt_check fail
  46        dmb     ishld
  47        ldr     w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
  48        cmp     w_tmp, seqcnt
  49        b.ne    \fail
  50        .endm
  51
  52        .macro  syscall_check fail
  53        ldr     w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
  54        cbnz    w_tmp, \fail
  55        .endm
  56
  57        .macro get_nsec_per_sec res
  58        mov     \res, #NSEC_PER_SEC_LO16
  59        movk    \res, #NSEC_PER_SEC_HI16, lsl #16
  60        .endm
  61
  62        /*
  63         * Returns the clock delta, in nanoseconds left-shifted by the clock
  64         * shift.
  65         */
  66        .macro  get_clock_shifted_nsec res, cycle_last, mult
  67        /* Read the virtual counter. */
  68        isb
  69        mrs     x_tmp, cntvct_el0
  70        /* Calculate cycle delta and convert to ns. */
  71        sub     \res, x_tmp, \cycle_last
  72        /* We can only guarantee 56 bits of precision. */
  73        movn    x_tmp, #0xff00, lsl #48
  74        and     \res, x_tmp, \res
  75        mul     \res, \res, \mult
  76        .endm
  77
  78        /*
  79         * Returns in res_{sec,nsec} the REALTIME timespec, based on the
  80         * "wall time" (xtime) and the clock_mono delta.
  81         */
  82        .macro  get_ts_realtime res_sec, res_nsec, \
  83                        clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
  84        add     \res_nsec, \clock_nsec, \xtime_nsec
  85        udiv    x_tmp, \res_nsec, \nsec_to_sec
  86        add     \res_sec, \xtime_sec, x_tmp
  87        msub    \res_nsec, x_tmp, \nsec_to_sec, \res_nsec
  88        .endm
  89
  90        /*
  91         * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
  92         * used for CLOCK_MONOTONIC_RAW.
  93         */
  94        .macro  get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
  95        udiv    \res_sec, \clock_nsec, \nsec_to_sec
  96        msub    \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
  97        .endm
  98
  99        /* sec and nsec are modified in place. */
 100        .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
 101        /* Add timespec. */
 102        add     \sec, \sec, \ts_sec
 103        add     \nsec, \nsec, \ts_nsec
 104
 105        /* Normalise the new timespec. */
 106        cmp     \nsec, \nsec_to_sec
 107        b.lt    9999f
 108        sub     \nsec, \nsec, \nsec_to_sec
 109        add     \sec, \sec, #1
 1109999:
 111        cmp     \nsec, #0
 112        b.ge    9998f
 113        add     \nsec, \nsec, \nsec_to_sec
 114        sub     \sec, \sec, #1
 1159998:
 116        .endm
 117
 118        .macro clock_gettime_return, shift=0
 119        .if \shift == 1
 120        lsr     x11, x11, x12
 121        .endif
 122        stp     x10, x11, [x1, #TSPEC_TV_SEC]
 123        mov     x0, xzr
 124        ret
 125        .endm
 126
 127        .macro jump_slot jumptable, index, label
 128        .if (. - \jumptable) != 4 * (\index)
 129        .error "Jump slot index mismatch"
 130        .endif
 131        b       \label
 132        .endm
 133
 134        .text
 135
 136/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
 137ENTRY(__kernel_gettimeofday)
 138        .cfi_startproc
 139        adr     vdso_data, _vdso_data
 140        /* If tv is NULL, skip to the timezone code. */
 141        cbz     x0, 2f
 142
 143        /* Compute the time of day. */
 1441:      seqcnt_acquire
 145        syscall_check fail=4f
 146        ldr     x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
 147        /* w11 = cs_mono_mult, w12 = cs_shift */
 148        ldp     w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
 149        ldp     x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
 150        seqcnt_check fail=1b
 151
 152        get_nsec_per_sec res=x9
 153        lsl     x9, x9, x12
 154
 155        get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
 156        get_ts_realtime res_sec=x10, res_nsec=x11, \
 157                clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
 158
 159        /* Convert ns to us. */
 160        mov     x13, #1000
 161        lsl     x13, x13, x12
 162        udiv    x11, x11, x13
 163        stp     x10, x11, [x0, #TVAL_TV_SEC]
 1642:
 165        /* If tz is NULL, return 0. */
 166        cbz     x1, 3f
 167        ldp     w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
 168        stp     w4, w5, [x1, #TZ_MINWEST]
 1693:
 170        mov     x0, xzr
 171        ret
 1724:
 173        /* Syscall fallback. */
 174        mov     x8, #__NR_gettimeofday
 175        svc     #0
 176        ret
 177        .cfi_endproc
 178ENDPROC(__kernel_gettimeofday)
 179
 180#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE
 181
 182/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
 183ENTRY(__kernel_clock_gettime)
 184        .cfi_startproc
 185        cmp     w0, #JUMPSLOT_MAX
 186        b.hi    syscall
 187        adr     vdso_data, _vdso_data
 188        adr     x_tmp, jumptable
 189        add     x_tmp, x_tmp, w0, uxtw #2
 190        br      x_tmp
 191
 192        ALIGN
 193jumptable:
 194        jump_slot jumptable, CLOCK_REALTIME, realtime
 195        jump_slot jumptable, CLOCK_MONOTONIC, monotonic
 196        b       syscall
 197        b       syscall
 198        jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
 199        jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
 200        jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
 201
 202        .if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
 203        .error  "Wrong jumptable size"
 204        .endif
 205
 206        ALIGN
 207realtime:
 208        seqcnt_acquire
 209        syscall_check fail=syscall
 210        ldr     x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
 211        /* w11 = cs_mono_mult, w12 = cs_shift */
 212        ldp     w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
 213        ldp     x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
 214        seqcnt_check fail=realtime
 215
 216        /* All computations are done with left-shifted nsecs. */
 217        get_nsec_per_sec res=x9
 218        lsl     x9, x9, x12
 219
 220        get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
 221        get_ts_realtime res_sec=x10, res_nsec=x11, \
 222                clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
 223        clock_gettime_return, shift=1
 224
 225        ALIGN
 226monotonic:
 227        seqcnt_acquire
 228        syscall_check fail=syscall
 229        ldr     x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
 230        /* w11 = cs_mono_mult, w12 = cs_shift */
 231        ldp     w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
 232        ldp     x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
 233        ldp     x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
 234        seqcnt_check fail=monotonic
 235
 236        /* All computations are done with left-shifted nsecs. */
 237        lsl     x4, x4, x12
 238        get_nsec_per_sec res=x9
 239        lsl     x9, x9, x12
 240
 241        get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
 242        get_ts_realtime res_sec=x10, res_nsec=x11, \
 243                clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
 244
 245        add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
 246        clock_gettime_return, shift=1
 247
 248        ALIGN
 249monotonic_raw:
 250        seqcnt_acquire
 251        syscall_check fail=syscall
 252        ldr     x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
 253        /* w11 = cs_raw_mult, w12 = cs_shift */
 254        ldp     w12, w11, [vdso_data, #VDSO_CS_SHIFT]
 255        ldp     x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
 256        seqcnt_check fail=monotonic_raw
 257
 258        /* All computations are done with left-shifted nsecs. */
 259        get_nsec_per_sec res=x9
 260        lsl     x9, x9, x12
 261
 262        get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
 263        get_ts_clock_raw res_sec=x10, res_nsec=x11, \
 264                clock_nsec=x15, nsec_to_sec=x9
 265
 266        add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
 267        clock_gettime_return, shift=1
 268
 269        ALIGN
 270realtime_coarse:
 271        seqcnt_acquire
 272        ldp     x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
 273        seqcnt_check fail=realtime_coarse
 274        clock_gettime_return
 275
 276        ALIGN
 277monotonic_coarse:
 278        seqcnt_acquire
 279        ldp     x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
 280        ldp     x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
 281        seqcnt_check fail=monotonic_coarse
 282
 283        /* Computations are done in (non-shifted) nsecs. */
 284        get_nsec_per_sec res=x9
 285        add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
 286        clock_gettime_return
 287
 288        ALIGN
 289syscall: /* Syscall fallback. */
 290        mov     x8, #__NR_clock_gettime
 291        svc     #0
 292        ret
 293        .cfi_endproc
 294ENDPROC(__kernel_clock_gettime)
 295
 296/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
 297ENTRY(__kernel_clock_getres)
 298        .cfi_startproc
 299        cmp     w0, #CLOCK_REALTIME
 300        ccmp    w0, #CLOCK_MONOTONIC, #0x4, ne
 301        ccmp    w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
 302        b.ne    1f
 303
 304        ldr     x2, 5f
 305        b       2f
 3061:
 307        cmp     w0, #CLOCK_REALTIME_COARSE
 308        ccmp    w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
 309        b.ne    4f
 310        ldr     x2, 6f
 3112:
 312        cbz     x1, 3f
 313        stp     xzr, x2, [x1]
 314
 3153:      /* res == NULL. */
 316        mov     w0, wzr
 317        ret
 318
 3194:      /* Syscall fallback. */
 320        mov     x8, #__NR_clock_getres
 321        svc     #0
 322        ret
 3235:
 324        .quad   CLOCK_REALTIME_RES
 3256:
 326        .quad   CLOCK_COARSE_RES
 327        .cfi_endproc
 328ENDPROC(__kernel_clock_getres)
 329