linux/kernel/time/sched_clock.c
<<
>>
Prefs
   1/*
   2 * sched_clock.c: Generic sched_clock() support, to extend low level
   3 *                hardware time counters to full 64-bit ns values.
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 */
   9#include <linux/clocksource.h>
  10#include <linux/init.h>
  11#include <linux/jiffies.h>
  12#include <linux/ktime.h>
  13#include <linux/kernel.h>
  14#include <linux/moduleparam.h>
  15#include <linux/sched.h>
  16#include <linux/sched/clock.h>
  17#include <linux/syscore_ops.h>
  18#include <linux/hrtimer.h>
  19#include <linux/sched_clock.h>
  20#include <linux/seqlock.h>
  21#include <linux/bitops.h>
  22
  23/**
  24 * struct clock_read_data - data required to read from sched_clock()
  25 *
  26 * @epoch_ns:           sched_clock() value at last update
  27 * @epoch_cyc:          Clock cycle value at last update.
  28 * @sched_clock_mask:   Bitmask for two's complement subtraction of non 64bit
  29 *                      clocks.
  30 * @read_sched_clock:   Current clock source (or dummy source when suspended).
  31 * @mult:               Multipler for scaled math conversion.
  32 * @shift:              Shift value for scaled math conversion.
  33 *
  34 * Care must be taken when updating this structure; it is read by
  35 * some very hot code paths. It occupies <=40 bytes and, when combined
  36 * with the seqcount used to synchronize access, comfortably fits into
  37 * a 64 byte cache line.
  38 */
  39struct clock_read_data {
  40        u64 epoch_ns;
  41        u64 epoch_cyc;
  42        u64 sched_clock_mask;
  43        u64 (*read_sched_clock)(void);
  44        u32 mult;
  45        u32 shift;
  46};
  47
  48/**
  49 * struct clock_data - all data needed for sched_clock() (including
  50 *                     registration of a new clock source)
  51 *
  52 * @seq:                Sequence counter for protecting updates. The lowest
  53 *                      bit is the index for @read_data.
  54 * @read_data:          Data required to read from sched_clock.
  55 * @wrap_kt:            Duration for which clock can run before wrapping.
  56 * @rate:               Tick rate of the registered clock.
  57 * @actual_read_sched_clock: Registered hardware level clock read function.
  58 *
  59 * The ordering of this structure has been chosen to optimize cache
  60 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
  61 * into a single 64-byte cache line.
  62 */
  63struct clock_data {
  64        seqcount_t              seq;
  65        struct clock_read_data  read_data[2];
  66        ktime_t                 wrap_kt;
  67        unsigned long           rate;
  68
  69        u64 (*actual_read_sched_clock)(void);
  70};
  71
  72static struct hrtimer sched_clock_timer;
  73static int irqtime = -1;
  74
  75core_param(irqtime, irqtime, int, 0400);
  76
  77static u64 notrace jiffy_sched_clock_read(void)
  78{
  79        /*
  80         * We don't need to use get_jiffies_64 on 32-bit arches here
  81         * because we register with BITS_PER_LONG
  82         */
  83        return (u64)(jiffies - INITIAL_JIFFIES);
  84}
  85
  86static struct clock_data cd ____cacheline_aligned = {
  87        .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
  88                          .read_sched_clock = jiffy_sched_clock_read, },
  89        .actual_read_sched_clock = jiffy_sched_clock_read,
  90};
  91
  92static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
  93{
  94        return (cyc * mult) >> shift;
  95}
  96
  97unsigned long long notrace sched_clock(void)
  98{
  99        u64 cyc, res;
 100        unsigned long seq;
 101        struct clock_read_data *rd;
 102
 103        do {
 104                seq = raw_read_seqcount(&cd.seq);
 105                rd = cd.read_data + (seq & 1);
 106
 107                cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
 108                      rd->sched_clock_mask;
 109                res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
 110        } while (read_seqcount_retry(&cd.seq, seq));
 111
 112        return res;
 113}
 114
 115/*
 116 * Updating the data required to read the clock.
 117 *
 118 * sched_clock() will never observe mis-matched data even if called from
 119 * an NMI. We do this by maintaining an odd/even copy of the data and
 120 * steering sched_clock() to one or the other using a sequence counter.
 121 * In order to preserve the data cache profile of sched_clock() as much
 122 * as possible the system reverts back to the even copy when the update
 123 * completes; the odd copy is used *only* during an update.
 124 */
 125static void update_clock_read_data(struct clock_read_data *rd)
 126{
 127        /* update the backup (odd) copy with the new data */
 128        cd.read_data[1] = *rd;
 129
 130        /* steer readers towards the odd copy */
 131        raw_write_seqcount_latch(&cd.seq);
 132
 133        /* now its safe for us to update the normal (even) copy */
 134        cd.read_data[0] = *rd;
 135
 136        /* switch readers back to the even copy */
 137        raw_write_seqcount_latch(&cd.seq);
 138}
 139
 140/*
 141 * Atomically update the sched_clock() epoch.
 142 */
 143static void update_sched_clock(void)
 144{
 145        u64 cyc;
 146        u64 ns;
 147        struct clock_read_data rd;
 148
 149        rd = cd.read_data[0];
 150
 151        cyc = cd.actual_read_sched_clock();
 152        ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
 153
 154        rd.epoch_ns = ns;
 155        rd.epoch_cyc = cyc;
 156
 157        update_clock_read_data(&rd);
 158}
 159
 160static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
 161{
 162        update_sched_clock();
 163        hrtimer_forward_now(hrt, cd.wrap_kt);
 164
 165        return HRTIMER_RESTART;
 166}
 167
 168void __init
 169sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
 170{
 171        u64 res, wrap, new_mask, new_epoch, cyc, ns;
 172        u32 new_mult, new_shift;
 173        unsigned long r;
 174        char r_unit;
 175        struct clock_read_data rd;
 176
 177        if (cd.rate > rate)
 178                return;
 179
 180        WARN_ON(!irqs_disabled());
 181
 182        /* Calculate the mult/shift to convert counter ticks to ns. */
 183        clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
 184
 185        new_mask = CLOCKSOURCE_MASK(bits);
 186        cd.rate = rate;
 187
 188        /* Calculate how many nanosecs until we risk wrapping */
 189        wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
 190        cd.wrap_kt = ns_to_ktime(wrap);
 191
 192        rd = cd.read_data[0];
 193
 194        /* Update epoch for new counter and update 'epoch_ns' from old counter*/
 195        new_epoch = read();
 196        cyc = cd.actual_read_sched_clock();
 197        ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
 198        cd.actual_read_sched_clock = read;
 199
 200        rd.read_sched_clock     = read;
 201        rd.sched_clock_mask     = new_mask;
 202        rd.mult                 = new_mult;
 203        rd.shift                = new_shift;
 204        rd.epoch_cyc            = new_epoch;
 205        rd.epoch_ns             = ns;
 206
 207        update_clock_read_data(&rd);
 208
 209        if (sched_clock_timer.function != NULL) {
 210                /* update timeout for clock wrap */
 211                hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
 212        }
 213
 214        r = rate;
 215        if (r >= 4000000) {
 216                r /= 1000000;
 217                r_unit = 'M';
 218        } else {
 219                if (r >= 1000) {
 220                        r /= 1000;
 221                        r_unit = 'k';
 222                } else {
 223                        r_unit = ' ';
 224                }
 225        }
 226
 227        /* Calculate the ns resolution of this counter */
 228        res = cyc_to_ns(1ULL, new_mult, new_shift);
 229
 230        pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
 231                bits, r, r_unit, res, wrap);
 232
 233        /* Enable IRQ time accounting if we have a fast enough sched_clock() */
 234        if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
 235                enable_sched_clock_irqtime();
 236
 237        pr_debug("Registered %pF as sched_clock source\n", read);
 238}
 239
 240void __init sched_clock_postinit(void)
 241{
 242        /*
 243         * If no sched_clock() function has been provided at that point,
 244         * make it the final one one.
 245         */
 246        if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
 247                sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
 248
 249        update_sched_clock();
 250
 251        /*
 252         * Start the timer to keep sched_clock() properly updated and
 253         * sets the initial epoch.
 254         */
 255        hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 256        sched_clock_timer.function = sched_clock_poll;
 257        hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
 258}
 259
 260/*
 261 * Clock read function for use when the clock is suspended.
 262 *
 263 * This function makes it appear to sched_clock() as if the clock
 264 * stopped counting at its last update.
 265 *
 266 * This function must only be called from the critical
 267 * section in sched_clock(). It relies on the read_seqcount_retry()
 268 * at the end of the critical section to be sure we observe the
 269 * correct copy of 'epoch_cyc'.
 270 */
 271static u64 notrace suspended_sched_clock_read(void)
 272{
 273        unsigned long seq = raw_read_seqcount(&cd.seq);
 274
 275        return cd.read_data[seq & 1].epoch_cyc;
 276}
 277
 278static int sched_clock_suspend(void)
 279{
 280        struct clock_read_data *rd = &cd.read_data[0];
 281
 282        update_sched_clock();
 283        hrtimer_cancel(&sched_clock_timer);
 284        rd->read_sched_clock = suspended_sched_clock_read;
 285
 286        return 0;
 287}
 288
 289static void sched_clock_resume(void)
 290{
 291        struct clock_read_data *rd = &cd.read_data[0];
 292
 293        rd->epoch_cyc = cd.actual_read_sched_clock();
 294        hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
 295        rd->read_sched_clock = cd.actual_read_sched_clock;
 296}
 297
 298static struct syscore_ops sched_clock_ops = {
 299        .suspend        = sched_clock_suspend,
 300        .resume         = sched_clock_resume,
 301};
 302
 303static int __init sched_clock_syscore_init(void)
 304{
 305        register_syscore_ops(&sched_clock_ops);
 306
 307        return 0;
 308}
 309device_initcall(sched_clock_syscore_init);
 310