linux/arch/powerpc/kernel/vdso32/gettimeofday.S
<<
>>
Prefs
   1/*
   2 * Userland implementation of gettimeofday() for 32 bits processes in a
   3 * ppc64 kernel for use in the vDSO
   4 *
   5 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
   6 *                    IBM Corp.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License
  10 * as published by the Free Software Foundation; either version
  11 * 2 of the License, or (at your option) any later version.
  12 */
  13#include <asm/processor.h>
  14#include <asm/ppc_asm.h>
  15#include <asm/vdso.h>
  16#include <asm/asm-offsets.h>
  17#include <asm/unistd.h>
  18
  19/* Offset for the low 32-bit part of a field of long type */
  20#ifdef CONFIG_PPC64
  21#define LOPART  4
  22#define TSPEC_TV_SEC    TSPC64_TV_SEC+LOPART
  23#else
  24#define LOPART  0
  25#define TSPEC_TV_SEC    TSPC32_TV_SEC
  26#endif
  27
  28        .text
  29/*
  30 * Exact prototype of gettimeofday
  31 *
  32 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
  33 *
  34 */
  35V_FUNCTION_BEGIN(__kernel_gettimeofday)
  36  .cfi_startproc
  37        mflr    r12
  38  .cfi_register lr,r12
  39
  40        mr      r10,r3                  /* r10 saves tv */
  41        mr      r11,r4                  /* r11 saves tz */
  42        bl      __get_datapage@local    /* get data page */
  43        mr      r9, r3                  /* datapage ptr in r9 */
  44        cmplwi  r10,0                   /* check if tv is NULL */
  45        beq     3f
  46        lis     r7,1000000@ha           /* load up USEC_PER_SEC */
  47        addi    r7,r7,1000000@l         /* so we get microseconds in r4 */
  48        bl      __do_get_tspec@local    /* get sec/usec from tb & kernel */
  49        stw     r3,TVAL32_TV_SEC(r10)
  50        stw     r4,TVAL32_TV_USEC(r10)
  51
  523:      cmplwi  r11,0                   /* check if tz is NULL */
  53        beq     1f
  54        lwz     r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
  55        lwz     r5,CFG_TZ_DSTTIME(r9)
  56        stw     r4,TZONE_TZ_MINWEST(r11)
  57        stw     r5,TZONE_TZ_DSTTIME(r11)
  58
  591:      mtlr    r12
  60        crclr   cr0*4+so
  61        li      r3,0
  62        blr
  63  .cfi_endproc
  64V_FUNCTION_END(__kernel_gettimeofday)
  65
  66/*
  67 * Exact prototype of clock_gettime()
  68 *
  69 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
  70 *
  71 */
  72V_FUNCTION_BEGIN(__kernel_clock_gettime)
  73  .cfi_startproc
  74        /* Check for supported clock IDs */
  75        cmpli   cr0,r3,CLOCK_REALTIME
  76        cmpli   cr1,r3,CLOCK_MONOTONIC
  77        cror    cr0*4+eq,cr0*4+eq,cr1*4+eq
  78        bne     cr0,99f
  79
  80        mflr    r12                     /* r12 saves lr */
  81  .cfi_register lr,r12
  82        mr      r11,r4                  /* r11 saves tp */
  83        bl      __get_datapage@local    /* get data page */
  84        mr      r9,r3                   /* datapage ptr in r9 */
  85        lis     r7,NSEC_PER_SEC@h       /* want nanoseconds */
  86        ori     r7,r7,NSEC_PER_SEC@l
  8750:     bl      __do_get_tspec@local    /* get sec/nsec from tb & kernel */
  88        bne     cr1,80f                 /* not monotonic -> all done */
  89
  90        /*
  91         * CLOCK_MONOTONIC
  92         */
  93
  94        /* now we must fixup using wall to monotonic. We need to snapshot
  95         * that value and do the counter trick again. Fortunately, we still
  96         * have the counter value in r8 that was returned by __do_get_xsec.
  97         * At this point, r3,r4 contain our sec/nsec values, r5 and r6
  98         * can be used, r7 contains NSEC_PER_SEC.
  99         */
 100
 101        lwz     r5,WTOM_CLOCK_SEC(r9)
 102        lwz     r6,WTOM_CLOCK_NSEC(r9)
 103
 104        /* We now have our offset in r5,r6. We create a fake dependency
 105         * on that value and re-check the counter
 106         */
 107        or      r0,r6,r5
 108        xor     r0,r0,r0
 109        add     r9,r9,r0
 110        lwz     r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
 111        cmpl    cr0,r8,r0               /* check if updated */
 112        bne-    50b
 113
 114        /* Calculate and store result. Note that this mimics the C code,
 115         * which may cause funny results if nsec goes negative... is that
 116         * possible at all ?
 117         */
 118        add     r3,r3,r5
 119        add     r4,r4,r6
 120        cmpw    cr0,r4,r7
 121        cmpwi   cr1,r4,0
 122        blt     1f
 123        subf    r4,r7,r4
 124        addi    r3,r3,1
 1251:      bge     cr1,80f
 126        addi    r3,r3,-1
 127        add     r4,r4,r7
 128
 12980:     stw     r3,TSPC32_TV_SEC(r11)
 130        stw     r4,TSPC32_TV_NSEC(r11)
 131
 132        mtlr    r12
 133        crclr   cr0*4+so
 134        li      r3,0
 135        blr
 136
 137        /*
 138         * syscall fallback
 139         */
 14099:
 141        li      r0,__NR_clock_gettime
 142        sc
 143        blr
 144  .cfi_endproc
 145V_FUNCTION_END(__kernel_clock_gettime)
 146
 147
 148/*
 149 * Exact prototype of clock_getres()
 150 *
 151 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
 152 *
 153 */
 154V_FUNCTION_BEGIN(__kernel_clock_getres)
 155  .cfi_startproc
 156        /* Check for supported clock IDs */
 157        cmpwi   cr0,r3,CLOCK_REALTIME
 158        cmpwi   cr1,r3,CLOCK_MONOTONIC
 159        cror    cr0*4+eq,cr0*4+eq,cr1*4+eq
 160        bne     cr0,99f
 161
 162        li      r3,0
 163        cmpli   cr0,r4,0
 164        crclr   cr0*4+so
 165        beqlr
 166        lis     r5,CLOCK_REALTIME_RES@h
 167        ori     r5,r5,CLOCK_REALTIME_RES@l
 168        stw     r3,TSPC32_TV_SEC(r4)
 169        stw     r5,TSPC32_TV_NSEC(r4)
 170        blr
 171
 172        /*
 173         * syscall fallback
 174         */
 17599:
 176        li      r0,__NR_clock_getres
 177        sc
 178        blr
 179  .cfi_endproc
 180V_FUNCTION_END(__kernel_clock_getres)
 181
 182
 183/*
 184 * Exact prototype of time()
 185 *
 186 * time_t time(time *t);
 187 *
 188 */
 189V_FUNCTION_BEGIN(__kernel_time)
 190  .cfi_startproc
 191        mflr    r12
 192  .cfi_register lr,r12
 193
 194        mr      r11,r3                  /* r11 holds t */
 195        bl      __get_datapage@local
 196        mr      r9, r3                  /* datapage ptr in r9 */
 197
 198        lwz     r3,STAMP_XTIME+TSPEC_TV_SEC(r9)
 199
 200        cmplwi  r11,0                   /* check if t is NULL */
 201        beq     2f
 202        stw     r3,0(r11)               /* store result at *t */
 2032:      mtlr    r12
 204        crclr   cr0*4+so
 205        blr
 206  .cfi_endproc
 207V_FUNCTION_END(__kernel_time)
 208
 209/*
 210 * This is the core of clock_gettime() and gettimeofday(),
 211 * it returns the current time in r3 (seconds) and r4.
 212 * On entry, r7 gives the resolution of r4, either USEC_PER_SEC
 213 * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds.
 214 * It expects the datapage ptr in r9 and doesn't clobber it.
 215 * It clobbers r0, r5 and r6.
 216 * On return, r8 contains the counter value that can be reused.
 217 * This clobbers cr0 but not any other cr field.
 218 */
 219__do_get_tspec:
 220  .cfi_startproc
 221        /* Check for update count & load values. We use the low
 222         * order 32 bits of the update count
 223         */
 2241:      lwz     r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
 225        andi.   r0,r8,1                 /* pending update ? loop */
 226        bne-    1b
 227        xor     r0,r8,r8                /* create dependency */
 228        add     r9,r9,r0
 229
 230        /* Load orig stamp (offset to TB) */
 231        lwz     r5,CFG_TB_ORIG_STAMP(r9)
 232        lwz     r6,(CFG_TB_ORIG_STAMP+4)(r9)
 233
 234        /* Get a stable TB value */
 2352:      mfspr   r3, SPRN_TBRU
 236        mfspr   r4, SPRN_TBRL
 237        mfspr   r0, SPRN_TBRU
 238        cmplw   cr0,r3,r0
 239        bne-    2b
 240
 241        /* Subtract tb orig stamp and shift left 12 bits.
 242         */
 243        subfc   r4,r6,r4
 244        subfe   r0,r5,r3
 245        slwi    r0,r0,12
 246        rlwimi. r0,r4,12,20,31
 247        slwi    r4,r4,12
 248
 249        /*
 250         * Load scale factor & do multiplication.
 251         * We only use the high 32 bits of the tb_to_xs value.
 252         * Even with a 1GHz timebase clock, the high 32 bits of
 253         * tb_to_xs will be at least 4 million, so the error from
 254         * ignoring the low 32 bits will be no more than 0.25ppm.
 255         * The error will just make the clock run very very slightly
 256         * slow until the next time the kernel updates the VDSO data,
 257         * at which point the clock will catch up to the kernel's value,
 258         * so there is no long-term error accumulation.
 259         */
 260        lwz     r5,CFG_TB_TO_XS(r9)     /* load values */
 261        mulhwu  r4,r4,r5
 262        li      r3,0
 263
 264        beq+    4f                      /* skip high part computation if 0 */
 265        mulhwu  r3,r0,r5
 266        mullw   r5,r0,r5
 267        addc    r4,r4,r5
 268        addze   r3,r3
 2694:
 270        /* At this point, we have seconds since the xtime stamp
 271         * as a 32.32 fixed-point number in r3 and r4.
 272         * Load & add the xtime stamp.
 273         */
 274        lwz     r5,STAMP_XTIME+TSPEC_TV_SEC(r9)
 275        lwz     r6,STAMP_SEC_FRAC(r9)
 276        addc    r4,r4,r6
 277        adde    r3,r3,r5
 278
 279        /* We create a fake dependency on the result in r3/r4
 280         * and re-check the counter
 281         */
 282        or      r6,r4,r3
 283        xor     r0,r6,r6
 284        add     r9,r9,r0
 285        lwz     r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
 286        cmplw   cr0,r8,r0               /* check if updated */
 287        bne-    1b
 288
 289        mulhwu  r4,r4,r7                /* convert to micro or nanoseconds */
 290
 291        blr
 292  .cfi_endproc
 293