1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef __ASM_ARCH_TIMER_H
20#define __ASM_ARCH_TIMER_H
21
22#include <asm/barrier.h>
23#include <asm/sysreg.h>
24
25#include <linux/bug.h>
26#include <linux/init.h>
27#include <linux/jump_label.h>
28#include <linux/smp.h>
29#include <linux/types.h>
30
31#include <clocksource/arm_arch_timer.h>
32
33#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
34extern struct static_key_false arch_timer_read_ool_enabled;
35#define needs_unstable_timer_counter_workaround() \
36 static_branch_unlikely(&arch_timer_read_ool_enabled)
37#else
38#define needs_unstable_timer_counter_workaround() false
39#endif
40
41enum arch_timer_erratum_match_type {
42 ate_match_dt,
43 ate_match_local_cap_id,
44 ate_match_acpi_oem_info,
45};
46
47struct clock_event_device;
48
49struct arch_timer_erratum_workaround {
50 enum arch_timer_erratum_match_type match_type;
51 const void *id;
52 const char *desc;
53 u32 (*read_cntp_tval_el0)(void);
54 u32 (*read_cntv_tval_el0)(void);
55 u64 (*read_cntvct_el0)(void);
56 int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
57 int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
58};
59
60DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
61 timer_unstable_counter_workaround);
62
63#define arch_timer_reg_read_stable(reg) \
64({ \
65 u64 _val; \
66 if (needs_unstable_timer_counter_workaround()) { \
67 const struct arch_timer_erratum_workaround *wa; \
68 preempt_disable_notrace(); \
69 wa = __this_cpu_read(timer_unstable_counter_workaround); \
70 if (wa && wa->read_##reg) \
71 _val = wa->read_##reg(); \
72 else \
73 _val = read_sysreg(reg); \
74 preempt_enable_notrace(); \
75 } else { \
76 _val = read_sysreg(reg); \
77 } \
78 _val; \
79})
80
81
82
83
84
85
86static __always_inline
87void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
88{
89 if (access == ARCH_TIMER_PHYS_ACCESS) {
90 switch (reg) {
91 case ARCH_TIMER_REG_CTRL:
92 write_sysreg(val, cntp_ctl_el0);
93 break;
94 case ARCH_TIMER_REG_TVAL:
95 write_sysreg(val, cntp_tval_el0);
96 break;
97 }
98 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
99 switch (reg) {
100 case ARCH_TIMER_REG_CTRL:
101 write_sysreg(val, cntv_ctl_el0);
102 break;
103 case ARCH_TIMER_REG_TVAL:
104 write_sysreg(val, cntv_tval_el0);
105 break;
106 }
107 }
108
109 isb();
110}
111
112static __always_inline
113u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
114{
115 if (access == ARCH_TIMER_PHYS_ACCESS) {
116 switch (reg) {
117 case ARCH_TIMER_REG_CTRL:
118 return read_sysreg(cntp_ctl_el0);
119 case ARCH_TIMER_REG_TVAL:
120 return arch_timer_reg_read_stable(cntp_tval_el0);
121 }
122 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
123 switch (reg) {
124 case ARCH_TIMER_REG_CTRL:
125 return read_sysreg(cntv_ctl_el0);
126 case ARCH_TIMER_REG_TVAL:
127 return arch_timer_reg_read_stable(cntv_tval_el0);
128 }
129 }
130
131 BUG();
132}
133
134static inline u32 arch_timer_get_cntfrq(void)
135{
136 return read_sysreg(cntfrq_el0);
137}
138
139static inline u32 arch_timer_get_cntkctl(void)
140{
141 return read_sysreg(cntkctl_el1);
142}
143
144static inline void arch_timer_set_cntkctl(u32 cntkctl)
145{
146 write_sysreg(cntkctl, cntkctl_el1);
147}
148
149static inline u64 arch_counter_get_cntpct(void)
150{
151
152
153
154 BUG();
155 return 0;
156}
157
158static inline u64 arch_counter_get_cntvct(void)
159{
160 isb();
161 return arch_timer_reg_read_stable(cntvct_el0);
162}
163
164static inline int arch_timer_arch_init(void)
165{
166 return 0;
167}
168
169#endif
170