linux/arch/arm/include/asm/arch_timer.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ASMARM_ARCH_TIMER_H
   3#define __ASMARM_ARCH_TIMER_H
   4
   5#include <asm/barrier.h>
   6#include <asm/errno.h>
   7#include <linux/clocksource.h>
   8#include <linux/init.h>
   9#include <linux/types.h>
  10
  11#include <clocksource/arm_arch_timer.h>
  12
  13#ifdef CONFIG_ARM_ARCH_TIMER
  14int arch_timer_arch_init(void);
  15
  16/*
  17 * These register accessors are marked inline so the compiler can
  18 * nicely work out which register we want, and chuck away the rest of
  19 * the code. At least it does so with a recent GCC (4.6.3).
  20 */
  21static __always_inline
  22void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
  23{
  24        if (access == ARCH_TIMER_PHYS_ACCESS) {
  25                switch (reg) {
  26                case ARCH_TIMER_REG_CTRL:
  27                        asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
  28                        break;
  29                case ARCH_TIMER_REG_TVAL:
  30                        asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
  31                        break;
  32                }
  33        } else if (access == ARCH_TIMER_VIRT_ACCESS) {
  34                switch (reg) {
  35                case ARCH_TIMER_REG_CTRL:
  36                        asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
  37                        break;
  38                case ARCH_TIMER_REG_TVAL:
  39                        asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
  40                        break;
  41                }
  42        }
  43
  44        isb();
  45}
  46
  47static __always_inline
  48u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
  49{
  50        u32 val = 0;
  51
  52        if (access == ARCH_TIMER_PHYS_ACCESS) {
  53                switch (reg) {
  54                case ARCH_TIMER_REG_CTRL:
  55                        asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
  56                        break;
  57                case ARCH_TIMER_REG_TVAL:
  58                        asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
  59                        break;
  60                }
  61        } else if (access == ARCH_TIMER_VIRT_ACCESS) {
  62                switch (reg) {
  63                case ARCH_TIMER_REG_CTRL:
  64                        asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
  65                        break;
  66                case ARCH_TIMER_REG_TVAL:
  67                        asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
  68                        break;
  69                }
  70        }
  71
  72        return val;
  73}
  74
  75static inline u32 arch_timer_get_cntfrq(void)
  76{
  77        u32 val;
  78        asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
  79        return val;
  80}
  81
  82static inline u64 arch_counter_get_cntpct(void)
  83{
  84        u64 cval;
  85
  86        isb();
  87        asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
  88        return cval;
  89}
  90
  91static inline u64 arch_counter_get_cntvct(void)
  92{
  93        u64 cval;
  94
  95        isb();
  96        asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
  97        return cval;
  98}
  99
 100static inline u32 arch_timer_get_cntkctl(void)
 101{
 102        u32 cntkctl;
 103        asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
 104        return cntkctl;
 105}
 106
 107static inline void arch_timer_set_cntkctl(u32 cntkctl)
 108{
 109        asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
 110        isb();
 111}
 112
 113#endif
 114
 115#endif
 116