1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/clocksource.h>
16#include <linux/clockchips.h>
17#include <linux/cpu.h>
18#include <linux/clk.h>
19#include <linux/delay.h>
20#include <linux/err.h>
21#include <linux/io.h>
22#include <linux/of.h>
23#include <linux/of_irq.h>
24#include <linux/of_address.h>
25#include <linux/sched_clock.h>
26
27#include <asm/cputype.h>
28
29#define GT_COUNTER0 0x00
30#define GT_COUNTER1 0x04
31
32#define GT_CONTROL 0x08
33#define GT_CONTROL_TIMER_ENABLE BIT(0)
34#define GT_CONTROL_COMP_ENABLE BIT(1)
35#define GT_CONTROL_IRQ_ENABLE BIT(2)
36#define GT_CONTROL_AUTO_INC BIT(3)
37
38#define GT_INT_STATUS 0x0c
39#define GT_INT_STATUS_EVENT_FLAG BIT(0)
40
41#define GT_COMP0 0x10
42#define GT_COMP1 0x14
43#define GT_AUTO_INC 0x18
44
45
46
47
48
49
50
51static void __iomem *gt_base;
52static unsigned long gt_clk_rate;
53static int gt_ppi;
54static struct clock_event_device __percpu *gt_evt;
55
56
57
58
59
60
61
62
63
64static u64 notrace _gt_counter_read(void)
65{
66 u64 counter;
67 u32 lower;
68 u32 upper, old_upper;
69
70 upper = readl_relaxed(gt_base + GT_COUNTER1);
71 do {
72 old_upper = upper;
73 lower = readl_relaxed(gt_base + GT_COUNTER0);
74 upper = readl_relaxed(gt_base + GT_COUNTER1);
75 } while (upper != old_upper);
76
77 counter = upper;
78 counter <<= 32;
79 counter |= lower;
80 return counter;
81}
82
83static u64 gt_counter_read(void)
84{
85 return _gt_counter_read();
86}
87
88
89
90
91
92
93
94
95
96static void gt_compare_set(unsigned long delta, int periodic)
97{
98 u64 counter = gt_counter_read();
99 unsigned long ctrl;
100
101 counter += delta;
102 ctrl = GT_CONTROL_TIMER_ENABLE;
103 writel_relaxed(ctrl, gt_base + GT_CONTROL);
104 writel_relaxed(lower_32_bits(counter), gt_base + GT_COMP0);
105 writel_relaxed(upper_32_bits(counter), gt_base + GT_COMP1);
106
107 if (periodic) {
108 writel_relaxed(delta, gt_base + GT_AUTO_INC);
109 ctrl |= GT_CONTROL_AUTO_INC;
110 }
111
112 ctrl |= GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE;
113 writel_relaxed(ctrl, gt_base + GT_CONTROL);
114}
115
116static int gt_clockevent_shutdown(struct clock_event_device *evt)
117{
118 unsigned long ctrl;
119
120 ctrl = readl(gt_base + GT_CONTROL);
121 ctrl &= ~(GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE |
122 GT_CONTROL_AUTO_INC);
123 writel(ctrl, gt_base + GT_CONTROL);
124 return 0;
125}
126
127static int gt_clockevent_set_periodic(struct clock_event_device *evt)
128{
129 gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1);
130 return 0;
131}
132
133static int gt_clockevent_set_next_event(unsigned long evt,
134 struct clock_event_device *unused)
135{
136 gt_compare_set(evt, 0);
137 return 0;
138}
139
140static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id)
141{
142 struct clock_event_device *evt = dev_id;
143
144 if (!(readl_relaxed(gt_base + GT_INT_STATUS) &
145 GT_INT_STATUS_EVENT_FLAG))
146 return IRQ_NONE;
147
148
149
150
151
152
153
154
155
156
157
158
159 if (clockevent_state_oneshot(evt))
160 gt_compare_set(ULONG_MAX, 0);
161
162 writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS);
163 evt->event_handler(evt);
164
165 return IRQ_HANDLED;
166}
167
168static int gt_starting_cpu(unsigned int cpu)
169{
170 struct clock_event_device *clk = this_cpu_ptr(gt_evt);
171
172 clk->name = "arm_global_timer";
173 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
174 CLOCK_EVT_FEAT_PERCPU;
175 clk->set_state_shutdown = gt_clockevent_shutdown;
176 clk->set_state_periodic = gt_clockevent_set_periodic;
177 clk->set_state_oneshot = gt_clockevent_shutdown;
178 clk->set_state_oneshot_stopped = gt_clockevent_shutdown;
179 clk->set_next_event = gt_clockevent_set_next_event;
180 clk->cpumask = cpumask_of(cpu);
181 clk->rating = 300;
182 clk->irq = gt_ppi;
183 clockevents_config_and_register(clk, gt_clk_rate,
184 1, 0xffffffff);
185 enable_percpu_irq(clk->irq, IRQ_TYPE_NONE);
186 return 0;
187}
188
189static int gt_dying_cpu(unsigned int cpu)
190{
191 struct clock_event_device *clk = this_cpu_ptr(gt_evt);
192
193 gt_clockevent_shutdown(clk);
194 disable_percpu_irq(clk->irq);
195 return 0;
196}
197
198static u64 gt_clocksource_read(struct clocksource *cs)
199{
200 return gt_counter_read();
201}
202
203static void gt_resume(struct clocksource *cs)
204{
205 unsigned long ctrl;
206
207 ctrl = readl(gt_base + GT_CONTROL);
208 if (!(ctrl & GT_CONTROL_TIMER_ENABLE))
209
210 writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
211}
212
213static struct clocksource gt_clocksource = {
214 .name = "arm_global_timer",
215 .rating = 300,
216 .read = gt_clocksource_read,
217 .mask = CLOCKSOURCE_MASK(64),
218 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
219 .resume = gt_resume,
220};
221
222#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
223static u64 notrace gt_sched_clock_read(void)
224{
225 return _gt_counter_read();
226}
227#endif
228
229static unsigned long gt_read_long(void)
230{
231 return readl_relaxed(gt_base + GT_COUNTER0);
232}
233
234static struct delay_timer gt_delay_timer = {
235 .read_current_timer = gt_read_long,
236};
237
238static void __init gt_delay_timer_init(void)
239{
240 gt_delay_timer.freq = gt_clk_rate;
241 register_current_timer_delay(>_delay_timer);
242}
243
244static int __init gt_clocksource_init(void)
245{
246 writel(0, gt_base + GT_CONTROL);
247 writel(0, gt_base + GT_COUNTER0);
248 writel(0, gt_base + GT_COUNTER1);
249
250 writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
251
252#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
253 sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
254#endif
255 return clocksource_register_hz(>_clocksource, gt_clk_rate);
256}
257
258static int __init global_timer_of_register(struct device_node *np)
259{
260 struct clk *gt_clk;
261 int err = 0;
262
263
264
265
266
267
268 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
269 && (read_cpuid_id() & 0xf0000f) < 0x200000) {
270 pr_warn("global-timer: non support for this cpu version.\n");
271 return -ENOSYS;
272 }
273
274 gt_ppi = irq_of_parse_and_map(np, 0);
275 if (!gt_ppi) {
276 pr_warn("global-timer: unable to parse irq\n");
277 return -EINVAL;
278 }
279
280 gt_base = of_iomap(np, 0);
281 if (!gt_base) {
282 pr_warn("global-timer: invalid base address\n");
283 return -ENXIO;
284 }
285
286 gt_clk = of_clk_get(np, 0);
287 if (!IS_ERR(gt_clk)) {
288 err = clk_prepare_enable(gt_clk);
289 if (err)
290 goto out_unmap;
291 } else {
292 pr_warn("global-timer: clk not found\n");
293 err = -EINVAL;
294 goto out_unmap;
295 }
296
297 gt_clk_rate = clk_get_rate(gt_clk);
298 gt_evt = alloc_percpu(struct clock_event_device);
299 if (!gt_evt) {
300 pr_warn("global-timer: can't allocate memory\n");
301 err = -ENOMEM;
302 goto out_clk;
303 }
304
305 err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
306 "gt", gt_evt);
307 if (err) {
308 pr_warn("global-timer: can't register interrupt %d (%d)\n",
309 gt_ppi, err);
310 goto out_free;
311 }
312
313
314 err = gt_clocksource_init();
315 if (err)
316 goto out_irq;
317
318 err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
319 "clockevents/arm/global_timer:starting",
320 gt_starting_cpu, gt_dying_cpu);
321 if (err)
322 goto out_irq;
323
324 gt_delay_timer_init();
325
326 return 0;
327
328out_irq:
329 free_percpu_irq(gt_ppi, gt_evt);
330out_free:
331 free_percpu(gt_evt);
332out_clk:
333 clk_disable_unprepare(gt_clk);
334out_unmap:
335 iounmap(gt_base);
336 WARN(err, "ARM Global timer register failed (%d)\n", err);
337
338 return err;
339}
340
341
342TIMER_OF_DECLARE(arm_gt, "arm,cortex-a9-global-timer",
343 global_timer_of_register);
344