linux/include/linux/sched/clock.h
<<
>>
Prefs
   1#ifndef _LINUX_SCHED_CLOCK_H
   2#define _LINUX_SCHED_CLOCK_H
   3
   4#include <linux/smp.h>
   5
   6/*
   7 * Do not use outside of architecture code which knows its limitations.
   8 *
   9 * sched_clock() has no promise of monotonicity or bounded drift between
  10 * CPUs, use (which you should not) requires disabling IRQs.
  11 *
  12 * Please use one of the three interfaces below.
  13 */
  14extern unsigned long long notrace sched_clock(void);
  15
  16/*
  17 * See the comment in kernel/sched/clock.c
  18 */
  19extern u64 running_clock(void);
  20extern u64 sched_clock_cpu(int cpu);
  21
  22
  23extern void sched_clock_init(void);
  24
  25#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  26static inline void sched_clock_tick(void)
  27{
  28}
  29
  30static inline void clear_sched_clock_stable(void)
  31{
  32}
  33
  34static inline void sched_clock_idle_sleep_event(void)
  35{
  36}
  37
  38static inline void sched_clock_idle_wakeup_event(void)
  39{
  40}
  41
  42static inline u64 cpu_clock(int cpu)
  43{
  44        return sched_clock();
  45}
  46
  47static inline u64 local_clock(void)
  48{
  49        return sched_clock();
  50}
  51#else
  52extern int sched_clock_stable(void);
  53extern void clear_sched_clock_stable(void);
  54
  55/*
  56 * When sched_clock_stable(), __sched_clock_offset provides the offset
  57 * between local_clock() and sched_clock().
  58 */
  59extern u64 __sched_clock_offset;
  60
  61extern void sched_clock_tick(void);
  62extern void sched_clock_tick_stable(void);
  63extern void sched_clock_idle_sleep_event(void);
  64extern void sched_clock_idle_wakeup_event(void);
  65
  66/*
  67 * As outlined in clock.c, provides a fast, high resolution, nanosecond
  68 * time source that is monotonic per cpu argument and has bounded drift
  69 * between cpus.
  70 *
  71 * ######################### BIG FAT WARNING ##########################
  72 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
  73 * # go backwards !!                                                  #
  74 * ####################################################################
  75 */
  76static inline u64 cpu_clock(int cpu)
  77{
  78        return sched_clock_cpu(cpu);
  79}
  80
  81static inline u64 local_clock(void)
  82{
  83        return sched_clock_cpu(raw_smp_processor_id());
  84}
  85#endif
  86
  87#ifdef CONFIG_IRQ_TIME_ACCOUNTING
  88/*
  89 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
  90 * The reason for this explicit opt-in is not to have perf penalty with
  91 * slow sched_clocks.
  92 */
  93extern void enable_sched_clock_irqtime(void);
  94extern void disable_sched_clock_irqtime(void);
  95#else
  96static inline void enable_sched_clock_irqtime(void) {}
  97static inline void disable_sched_clock_irqtime(void) {}
  98#endif
  99
 100#endif /* _LINUX_SCHED_CLOCK_H */
 101