linux/arch/arm64/include/asm/arch_timer.h
<<
>>
Prefs
   1/*
   2 * arch/arm64/include/asm/arch_timer.h
   3 *
   4 * Copyright (C) 2012 ARM Ltd.
   5 * Author: Marc Zyngier <marc.zyngier@arm.com>
   6 *
   7 * This program is free software: you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#ifndef __ASM_ARCH_TIMER_H
  20#define __ASM_ARCH_TIMER_H
  21
  22#include <asm/barrier.h>
  23#include <asm/sysreg.h>
  24
  25#include <linux/bug.h>
  26#include <linux/init.h>
  27#include <linux/jump_label.h>
  28#include <linux/smp.h>
  29#include <linux/types.h>
  30
  31#include <clocksource/arm_arch_timer.h>
  32
  33#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
  34#define has_erratum_handler(h)                                          \
  35        ({                                                              \
  36                const struct arch_timer_erratum_workaround *__wa;       \
  37                __wa = __this_cpu_read(timer_unstable_counter_workaround); \
  38                (__wa && __wa->h);                                      \
  39        })
  40
  41#define erratum_handler(h)                                              \
  42        ({                                                              \
  43                const struct arch_timer_erratum_workaround *__wa;       \
  44                __wa = __this_cpu_read(timer_unstable_counter_workaround); \
  45                (__wa && __wa->h) ? __wa->h : arch_timer_##h;           \
  46        })
  47
  48#else
  49#define has_erratum_handler(h)                     false
  50#define erratum_handler(h)                         (arch_timer_##h)
  51#endif
  52
  53enum arch_timer_erratum_match_type {
  54        ate_match_dt,
  55        ate_match_local_cap_id,
  56        ate_match_acpi_oem_info,
  57};
  58
  59struct clock_event_device;
  60
  61struct arch_timer_erratum_workaround {
  62        enum arch_timer_erratum_match_type match_type;
  63        const void *id;
  64        const char *desc;
  65        u32 (*read_cntp_tval_el0)(void);
  66        u32 (*read_cntv_tval_el0)(void);
  67        u64 (*read_cntpct_el0)(void);
  68        u64 (*read_cntvct_el0)(void);
  69        int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
  70        int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
  71};
  72
  73DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
  74                timer_unstable_counter_workaround);
  75
  76/* inline sysreg accessors that make erratum_handler() work */
  77static inline notrace u32 arch_timer_read_cntp_tval_el0(void)
  78{
  79        return read_sysreg(cntp_tval_el0);
  80}
  81
  82static inline notrace u32 arch_timer_read_cntv_tval_el0(void)
  83{
  84        return read_sysreg(cntv_tval_el0);
  85}
  86
  87static inline notrace u64 arch_timer_read_cntpct_el0(void)
  88{
  89        return read_sysreg(cntpct_el0);
  90}
  91
  92static inline notrace u64 arch_timer_read_cntvct_el0(void)
  93{
  94        return read_sysreg(cntvct_el0);
  95}
  96
  97#define arch_timer_reg_read_stable(reg)                                 \
  98        ({                                                              \
  99                u64 _val;                                               \
 100                                                                        \
 101                preempt_disable_notrace();                              \
 102                _val = erratum_handler(read_ ## reg)();                 \
 103                preempt_enable_notrace();                               \
 104                                                                        \
 105                _val;                                                   \
 106        })
 107
 108/*
 109 * These register accessors are marked inline so the compiler can
 110 * nicely work out which register we want, and chuck away the rest of
 111 * the code.
 112 */
 113static __always_inline
 114void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
 115{
 116        if (access == ARCH_TIMER_PHYS_ACCESS) {
 117                switch (reg) {
 118                case ARCH_TIMER_REG_CTRL:
 119                        write_sysreg(val, cntp_ctl_el0);
 120                        break;
 121                case ARCH_TIMER_REG_TVAL:
 122                        write_sysreg(val, cntp_tval_el0);
 123                        break;
 124                }
 125        } else if (access == ARCH_TIMER_VIRT_ACCESS) {
 126                switch (reg) {
 127                case ARCH_TIMER_REG_CTRL:
 128                        write_sysreg(val, cntv_ctl_el0);
 129                        break;
 130                case ARCH_TIMER_REG_TVAL:
 131                        write_sysreg(val, cntv_tval_el0);
 132                        break;
 133                }
 134        }
 135
 136        isb();
 137}
 138
 139static __always_inline
 140u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
 141{
 142        if (access == ARCH_TIMER_PHYS_ACCESS) {
 143                switch (reg) {
 144                case ARCH_TIMER_REG_CTRL:
 145                        return read_sysreg(cntp_ctl_el0);
 146                case ARCH_TIMER_REG_TVAL:
 147                        return arch_timer_reg_read_stable(cntp_tval_el0);
 148                }
 149        } else if (access == ARCH_TIMER_VIRT_ACCESS) {
 150                switch (reg) {
 151                case ARCH_TIMER_REG_CTRL:
 152                        return read_sysreg(cntv_ctl_el0);
 153                case ARCH_TIMER_REG_TVAL:
 154                        return arch_timer_reg_read_stable(cntv_tval_el0);
 155                }
 156        }
 157
 158        BUG();
 159}
 160
 161static inline u32 arch_timer_get_cntfrq(void)
 162{
 163        return read_sysreg(cntfrq_el0);
 164}
 165
 166static inline u32 arch_timer_get_cntkctl(void)
 167{
 168        return read_sysreg(cntkctl_el1);
 169}
 170
 171static inline void arch_timer_set_cntkctl(u32 cntkctl)
 172{
 173        write_sysreg(cntkctl, cntkctl_el1);
 174        isb();
 175}
 176
 177/*
 178 * Ensure that reads of the counter are treated the same as memory reads
 179 * for the purposes of ordering by subsequent memory barriers.
 180 *
 181 * This insanity brought to you by speculative system register reads,
 182 * out-of-order memory accesses, sequence locks and Thomas Gleixner.
 183 *
 184 * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
 185 */
 186#define arch_counter_enforce_ordering(val) do {                         \
 187        u64 tmp, _val = (val);                                          \
 188                                                                        \
 189        asm volatile(                                                   \
 190        "       eor     %0, %1, %1\n"                                   \
 191        "       add     %0, sp, %0\n"                                   \
 192        "       ldr     xzr, [%0]"                                      \
 193        : "=r" (tmp) : "r" (_val));                                     \
 194} while (0)
 195
 196static __always_inline u64 __arch_counter_get_cntpct_stable(void)
 197{
 198        u64 cnt;
 199
 200        isb();
 201        cnt = arch_timer_reg_read_stable(cntpct_el0);
 202        arch_counter_enforce_ordering(cnt);
 203        return cnt;
 204}
 205
 206static __always_inline u64 __arch_counter_get_cntpct(void)
 207{
 208        u64 cnt;
 209
 210        isb();
 211        cnt = read_sysreg(cntpct_el0);
 212        arch_counter_enforce_ordering(cnt);
 213        return cnt;
 214}
 215
 216static __always_inline u64 __arch_counter_get_cntvct_stable(void)
 217{
 218        u64 cnt;
 219
 220        isb();
 221        cnt = arch_timer_reg_read_stable(cntvct_el0);
 222        arch_counter_enforce_ordering(cnt);
 223        return cnt;
 224}
 225
 226static __always_inline u64 __arch_counter_get_cntvct(void)
 227{
 228        u64 cnt;
 229
 230        isb();
 231        cnt = read_sysreg(cntvct_el0);
 232        arch_counter_enforce_ordering(cnt);
 233        return cnt;
 234}
 235
 236#undef arch_counter_enforce_ordering
 237
 238static inline int arch_timer_arch_init(void)
 239{
 240        return 0;
 241}
 242
 243#endif
 244