1/* linux/include/linux/clocksource.h 2 * 3 * This file contains the structure definitions for clocksources. 4 * 5 * If you are not a clocksource, or timekeeping code, you should 6 * not be including this file! 7 */ 8#ifndef _LINUX_CLOCKSOURCE_H 9#define _LINUX_CLOCKSOURCE_H 10 11#include <linux/types.h> 12#include <linux/timex.h> 13#include <linux/time.h> 14#include <linux/list.h> 15#include <linux/cache.h> 16#include <linux/timer.h> 17#include <linux/init.h> 18#include <asm/div64.h> 19#include <asm/io.h> 20 21/* clocksource cycle base type */ 22typedef u64 cycle_t; 23struct clocksource; 24 25#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 26#include <asm/clocksource.h> 27#endif 28 29/** 30 * struct cyclecounter - hardware abstraction for a free running counter 31 * Provides completely state-free accessors to the underlying hardware. 32 * Depending on which hardware it reads, the cycle counter may wrap 33 * around quickly. Locking rules (if necessary) have to be defined 34 * by the implementor and user of specific instances of this API. 35 * 36 * @read: returns the current cycle value 37 * @mask: bitmask for two's complement 38 * subtraction of non 64 bit counters, 39 * see CLOCKSOURCE_MASK() helper macro 40 * @mult: cycle to nanosecond multiplier 41 * @shift: cycle to nanosecond divisor (power of two) 42 */ 43struct cyclecounter { 44 cycle_t (*read)(const struct cyclecounter *cc); 45 cycle_t mask; 46 u32 mult; 47 u32 shift; 48}; 49 50/** 51 * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds 52 * Contains the state needed by timecounter_read() to detect 53 * cycle counter wrap around. Initialize with 54 * timecounter_init(). Also used to convert cycle counts into the 55 * corresponding nanosecond counts with timecounter_cyc2time(). Users 56 * of this code are responsible for initializing the underlying 57 * cycle counter hardware, locking issues and reading the time 58 * more often than the cycle counter wraps around. The nanosecond 59 * counter will only wrap around after ~585 years. 60 * 61 * @cc: the cycle counter used by this instance 62 * @cycle_last: most recent cycle counter value seen by 63 * timecounter_read() 64 * @nsec: continuously increasing count 65 */ 66struct timecounter { 67 const struct cyclecounter *cc; 68 cycle_t cycle_last; 69 u64 nsec; 70}; 71 72/** 73 * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds 74 * @tc: Pointer to cycle counter. 75 * @cycles: Cycles 76 * 77 * XXX - This could use some mult_lxl_ll() asm optimization. Same code 78 * as in cyc2ns, but with unsigned result. 79 */ 80static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc, 81 cycle_t cycles) 82{ 83 u64 ret = (u64)cycles; 84 ret = (ret * cc->mult) >> cc->shift; 85 return ret; 86} 87 88/** 89 * timecounter_init - initialize a time counter 90 * @tc: Pointer to time counter which is to be initialized/reset 91 * @cc: A cycle counter, ready to be used. 92 * @start_tstamp: Arbitrary initial time stamp. 93 * 94 * After this call the current cycle register (roughly) corresponds to 95 * the initial time stamp. Every call to timecounter_read() increments 96 * the time stamp counter by the number of elapsed nanoseconds. 97 */ 98extern void timecounter_init(struct timecounter *tc, 99 const struct cyclecounter *cc, 100 u64 start_tstamp); 101 102/** 103 * timecounter_read - return nanoseconds elapsed since timecounter_init() 104 * plus the initial time stamp 105 * @tc: Pointer to time counter. 106 * 107 * In other words, keeps track of time since the same epoch as 108 * the function which generated the initial time stamp. 109 */ 110extern u64 timecounter_read(struct timecounter *tc); 111 112/** 113 * timecounter_cyc2time - convert a cycle counter to same 114 * time base as values returned by 115 * timecounter_read() 116 * @tc: Pointer to time counter. 117 * @cycle: a value returned by tc->cc->read() 118 * 119 * Cycle counts that are converted correctly as long as they 120 * fall into the interval [-1/2 max cycle count, +1/2 max cycle count], 121 * with "max cycle count" == cs->mask+1. 122 * 123 * This allows conversion of cycle counter values which were generated 124 * in the past. 125 */ 126extern u64 timecounter_cyc2time(struct timecounter *tc, 127 cycle_t cycle_tstamp); 128 129/** 130 * struct clocksource - hardware abstraction for a free running counter 131 * Provides mostly state-free accessors to the underlying hardware. 132 * This is the structure used for system time. 133 * 134 * @name: ptr to clocksource name 135 * @list: list head for registration 136 * @rating: rating value for selection (higher is better) 137 * To avoid rating inflation the following 138 * list should give you a guide as to how 139 * to assign your clocksource a rating 140 * 1-99: Unfit for real use 141 * Only available for bootup and testing purposes. 142 * 100-199: Base level usability. 143 * Functional for real use, but not desired. 144 * 200-299: Good. 145 * A correct and usable clocksource. 146 * 300-399: Desired. 147 * A reasonably fast and accurate clocksource. 148 * 400-499: Perfect 149 * The ideal clocksource. A must-use where 150 * available. 151 * @read: returns a cycle value, passes clocksource as argument 152 * @enable: optional function to enable the clocksource 153 * @disable: optional function to disable the clocksource 154 * @mask: bitmask for two's complement 155 * subtraction of non 64 bit counters 156 * @mult: cycle to nanosecond multiplier 157 * @shift: cycle to nanosecond divisor (power of two) 158 * @max_idle_ns: max idle time permitted by the clocksource (nsecs) 159 * @flags: flags describing special properties 160 * @archdata: arch-specific data 161 * @suspend: suspend function for the clocksource, if necessary 162 * @resume: resume function for the clocksource, if necessary 163 */ 164struct clocksource { 165 /* 166 * Hotpath data, fits in a single cache line when the 167 * clocksource itself is cacheline aligned. 168 */ 169 cycle_t (*read)(struct clocksource *cs); 170 cycle_t cycle_last; 171 cycle_t mask; 172 u32 mult; 173 u32 shift; 174 u64 max_idle_ns; 175 176#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 177 struct arch_clocksource_data archdata; 178#endif 179 180 const char *name; 181 struct list_head list; 182 int rating; 183 int (*enable)(struct clocksource *cs); 184 void (*disable)(struct clocksource *cs); 185 unsigned long flags; 186 void (*suspend)(struct clocksource *cs); 187 void (*resume)(struct clocksource *cs); 188 189#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 190 /* Watchdog related data, used by the framework */ 191 struct list_head wd_list; 192 cycle_t cs_last; 193 cycle_t wd_last; 194#endif 195} ____cacheline_aligned; 196 197/* 198 * Clock source flags bits:: 199 */ 200#define CLOCK_SOURCE_IS_CONTINUOUS 0x01 201#define CLOCK_SOURCE_MUST_VERIFY 0x02 202 203#define CLOCK_SOURCE_WATCHDOG 0x10 204#define CLOCK_SOURCE_VALID_FOR_HRES 0x20 205#define CLOCK_SOURCE_UNSTABLE 0x40 206 207/* simplify initialization of mask field */ 208#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) 209 210/** 211 * clocksource_khz2mult - calculates mult from khz and shift 212 * @khz: Clocksource frequency in KHz 213 * @shift_constant: Clocksource shift factor 214 * 215 * Helper functions that converts a khz counter frequency to a timsource 216 * multiplier, given the clocksource shift value 217 */ 218static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) 219{ 220 /* khz = cyc/(Million ns) 221 * mult/2^shift = ns/cyc 222 * mult = ns/cyc * 2^shift 223 * mult = 1Million/khz * 2^shift 224 * mult = 1000000 * 2^shift / khz 225 * mult = (1000000<<shift) / khz 226 */ 227 u64 tmp = ((u64)1000000) << shift_constant; 228 229 tmp += khz/2; /* round for do_div */ 230 do_div(tmp, khz); 231 232 return (u32)tmp; 233} 234 235/** 236 * clocksource_hz2mult - calculates mult from hz and shift 237 * @hz: Clocksource frequency in Hz 238 * @shift_constant: Clocksource shift factor 239 * 240 * Helper functions that converts a hz counter 241 * frequency to a timsource multiplier, given the 242 * clocksource shift value 243 */ 244static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) 245{ 246 /* hz = cyc/(Billion ns) 247 * mult/2^shift = ns/cyc 248 * mult = ns/cyc * 2^shift 249 * mult = 1Billion/hz * 2^shift 250 * mult = 1000000000 * 2^shift / hz 251 * mult = (1000000000<<shift) / hz 252 */ 253 u64 tmp = ((u64)1000000000) << shift_constant; 254 255 tmp += hz/2; /* round for do_div */ 256 do_div(tmp, hz); 257 258 return (u32)tmp; 259} 260 261/** 262 * clocksource_cyc2ns - converts clocksource cycles to nanoseconds 263 * 264 * Converts cycles to nanoseconds, using the given mult and shift. 265 * 266 * XXX - This could use some mult_lxl_ll() asm optimization 267 */ 268static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift) 269{ 270 return ((u64) cycles * mult) >> shift; 271} 272 273 274extern int clocksource_register(struct clocksource*); 275extern void clocksource_unregister(struct clocksource*); 276extern void clocksource_touch_watchdog(void); 277extern struct clocksource* clocksource_get_next(void); 278extern void clocksource_change_rating(struct clocksource *cs, int rating); 279extern void clocksource_suspend(void); 280extern void clocksource_resume(void); 281extern struct clocksource * __init __weak clocksource_default_clock(void); 282extern void clocksource_mark_unstable(struct clocksource *cs); 283 284extern void 285clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); 286 287/* 288 * Don't call __clocksource_register_scale directly, use 289 * clocksource_register_hz/khz 290 */ 291extern int 292__clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); 293extern void 294__clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); 295 296static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) 297{ 298 return __clocksource_register_scale(cs, 1, hz); 299} 300 301static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) 302{ 303 return __clocksource_register_scale(cs, 1000, khz); 304} 305 306static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) 307{ 308 __clocksource_updatefreq_scale(cs, 1, hz); 309} 310 311static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) 312{ 313 __clocksource_updatefreq_scale(cs, 1000, khz); 314} 315 316static inline void 317clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) 318{ 319 return clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, 320 NSEC_PER_SEC, minsec); 321} 322 323#ifdef CONFIG_GENERIC_TIME_VSYSCALL 324extern void 325update_vsyscall(struct timespec *ts, struct timespec *wtm, 326 struct clocksource *c, u32 mult); 327extern void update_vsyscall_tz(void); 328#else 329static inline void 330update_vsyscall(struct timespec *ts, struct timespec *wtm, 331 struct clocksource *c, u32 mult) 332{ 333} 334 335static inline void update_vsyscall_tz(void) 336{ 337} 338#endif 339 340extern void timekeeping_notify(struct clocksource *clock); 341 342extern cycle_t clocksource_mmio_readl_up(struct clocksource *); 343extern cycle_t clocksource_mmio_readl_down(struct clocksource *); 344extern cycle_t clocksource_mmio_readw_up(struct clocksource *); 345extern cycle_t clocksource_mmio_readw_down(struct clocksource *); 346 347extern int clocksource_mmio_init(void __iomem *, const char *, 348 unsigned long, int, unsigned, cycle_t (*)(struct clocksource *)); 349 350extern int clocksource_i8253_init(void); 351 352#endif /* _LINUX_CLOCKSOURCE_H */ 353