linux/arch/arm/kernel/sched_clock.c
<<
>>
Prefs
   1/*
   2 * sched_clock.c: support for extending counters to full 64-bit ns counter
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#include <linux/clocksource.h>
   9#include <linux/init.h>
  10#include <linux/jiffies.h>
  11#include <linux/kernel.h>
  12#include <linux/moduleparam.h>
  13#include <linux/sched.h>
  14#include <linux/syscore_ops.h>
  15#include <linux/timer.h>
  16
  17#include <asm/sched_clock.h>
  18
  19struct clock_data {
  20        u64 epoch_ns;
  21        u32 epoch_cyc;
  22        u32 epoch_cyc_copy;
  23        unsigned long rate;
  24        u32 mult;
  25        u32 shift;
  26        bool suspended;
  27        bool needs_suspend;
  28};
  29
  30static void sched_clock_poll(unsigned long wrap_ticks);
  31static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
  32static int irqtime = -1;
  33
  34core_param(irqtime, irqtime, int, 0400);
  35
  36static struct clock_data cd = {
  37        .mult   = NSEC_PER_SEC / HZ,
  38};
  39
  40static u32 __read_mostly sched_clock_mask = 0xffffffff;
  41
  42static u32 notrace jiffy_sched_clock_read(void)
  43{
  44        return (u32)(jiffies - INITIAL_JIFFIES);
  45}
  46
  47static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
  48
  49static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
  50{
  51        return (cyc * mult) >> shift;
  52}
  53
  54static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
  55{
  56        u64 epoch_ns;
  57        u32 epoch_cyc;
  58
  59        if (cd.suspended)
  60                return cd.epoch_ns;
  61
  62        /*
  63         * Load the epoch_cyc and epoch_ns atomically.  We do this by
  64         * ensuring that we always write epoch_cyc, epoch_ns and
  65         * epoch_cyc_copy in strict order, and read them in strict order.
  66         * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
  67         * the middle of an update, and we should repeat the load.
  68         */
  69        do {
  70                epoch_cyc = cd.epoch_cyc;
  71                smp_rmb();
  72                epoch_ns = cd.epoch_ns;
  73                smp_rmb();
  74        } while (epoch_cyc != cd.epoch_cyc_copy);
  75
  76        return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
  77}
  78
  79/*
  80 * Atomically update the sched_clock epoch.
  81 */
  82static void notrace update_sched_clock(void)
  83{
  84        unsigned long flags;
  85        u32 cyc;
  86        u64 ns;
  87
  88        cyc = read_sched_clock();
  89        ns = cd.epoch_ns +
  90                cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
  91                          cd.mult, cd.shift);
  92        /*
  93         * Write epoch_cyc and epoch_ns in a way that the update is
  94         * detectable in cyc_to_fixed_sched_clock().
  95         */
  96        raw_local_irq_save(flags);
  97        cd.epoch_cyc_copy = cyc;
  98        smp_wmb();
  99        cd.epoch_ns = ns;
 100        smp_wmb();
 101        cd.epoch_cyc = cyc;
 102        raw_local_irq_restore(flags);
 103}
 104
 105static void sched_clock_poll(unsigned long wrap_ticks)
 106{
 107        mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
 108        update_sched_clock();
 109}
 110
 111void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
 112{
 113        unsigned long r, w;
 114        u64 res, wrap;
 115        char r_unit;
 116
 117        if (cd.rate > rate)
 118                return;
 119
 120        BUG_ON(bits > 32);
 121        WARN_ON(!irqs_disabled());
 122        read_sched_clock = read;
 123        sched_clock_mask = (1 << bits) - 1;
 124        cd.rate = rate;
 125
 126        /* calculate the mult/shift to convert counter ticks to ns. */
 127        clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0);
 128
 129        r = rate;
 130        if (r >= 4000000) {
 131                r /= 1000000;
 132                r_unit = 'M';
 133        } else if (r >= 1000) {
 134                r /= 1000;
 135                r_unit = 'k';
 136        } else
 137                r_unit = ' ';
 138
 139        /* calculate how many ns until we wrap */
 140        wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift);
 141        do_div(wrap, NSEC_PER_MSEC);
 142        w = wrap;
 143
 144        /* calculate the ns resolution of this counter */
 145        res = cyc_to_ns(1ULL, cd.mult, cd.shift);
 146        pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
 147                bits, r, r_unit, res, w);
 148
 149        /*
 150         * Start the timer to keep sched_clock() properly updated and
 151         * sets the initial epoch.
 152         */
 153        sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
 154        update_sched_clock();
 155
 156        /*
 157         * Ensure that sched_clock() starts off at 0ns
 158         */
 159        cd.epoch_ns = 0;
 160
 161        /* Enable IRQ time accounting if we have a fast enough sched_clock */
 162        if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
 163                enable_sched_clock_irqtime();
 164
 165        pr_debug("Registered %pF as sched_clock source\n", read);
 166}
 167
 168static unsigned long long notrace sched_clock_32(void)
 169{
 170        u32 cyc = read_sched_clock();
 171        return cyc_to_sched_clock(cyc, sched_clock_mask);
 172}
 173
 174unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
 175
 176unsigned long long notrace sched_clock(void)
 177{
 178        return sched_clock_func();
 179}
 180
 181void __init sched_clock_postinit(void)
 182{
 183        /*
 184         * If no sched_clock function has been provided at that point,
 185         * make it the final one one.
 186         */
 187        if (read_sched_clock == jiffy_sched_clock_read)
 188                setup_sched_clock(jiffy_sched_clock_read, 32, HZ);
 189
 190        sched_clock_poll(sched_clock_timer.data);
 191}
 192
 193static int sched_clock_suspend(void)
 194{
 195        sched_clock_poll(sched_clock_timer.data);
 196        cd.suspended = true;
 197        return 0;
 198}
 199
 200static void sched_clock_resume(void)
 201{
 202        cd.epoch_cyc = read_sched_clock();
 203        cd.epoch_cyc_copy = cd.epoch_cyc;
 204        cd.suspended = false;
 205}
 206
 207static struct syscore_ops sched_clock_ops = {
 208        .suspend = sched_clock_suspend,
 209        .resume = sched_clock_resume,
 210};
 211
 212static int __init sched_clock_syscore_init(void)
 213{
 214        register_syscore_ops(&sched_clock_ops);
 215        return 0;
 216}
 217device_initcall(sched_clock_syscore_init);
 218