1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/clk.h>
19#include <linux/interrupt.h>
20#include <linux/clockchips.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/slab.h>
24#include <linux/sched_clock.h>
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#define TTC_CLK_CNTRL_OFFSET 0x00
46#define TTC_CNT_CNTRL_OFFSET 0x0C
47#define TTC_COUNT_VAL_OFFSET 0x18
48#define TTC_INTR_VAL_OFFSET 0x24
49#define TTC_ISR_OFFSET 0x54
50#define TTC_IER_OFFSET 0x60
51
52#define TTC_CNT_CNTRL_DISABLE_MASK 0x1
53
54#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5)
55#define TTC_CLK_CNTRL_PSV_MASK 0x1e
56#define TTC_CLK_CNTRL_PSV_SHIFT 1
57
58
59
60
61
62#define PRESCALE_EXPONENT 11
63#define PRESCALE 2048
64#define CLK_CNTRL_PRESCALE ((PRESCALE_EXPONENT - 1) << 1)
65#define CLK_CNTRL_PRESCALE_EN 1
66#define CNT_CNTRL_RESET (1 << 4)
67
68#define MAX_F_ERR 50
69
70
71
72
73
74
75
76
77
78struct ttc_timer {
79 void __iomem *base_addr;
80 unsigned long freq;
81 struct clk *clk;
82 struct notifier_block clk_rate_change_nb;
83};
84
85#define to_ttc_timer(x) \
86 container_of(x, struct ttc_timer, clk_rate_change_nb)
87
88struct ttc_timer_clocksource {
89 u32 scale_clk_ctrl_reg_old;
90 u32 scale_clk_ctrl_reg_new;
91 struct ttc_timer ttc;
92 struct clocksource cs;
93};
94
95#define to_ttc_timer_clksrc(x) \
96 container_of(x, struct ttc_timer_clocksource, cs)
97
98struct ttc_timer_clockevent {
99 struct ttc_timer ttc;
100 struct clock_event_device ce;
101};
102
103#define to_ttc_timer_clkevent(x) \
104 container_of(x, struct ttc_timer_clockevent, ce)
105
106static void __iomem *ttc_sched_clock_val_reg;
107
108
109
110
111
112
113
114static void ttc_set_interval(struct ttc_timer *timer,
115 unsigned long cycles)
116{
117 u32 ctrl_reg;
118
119
120 ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
121 ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
122 writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
123
124 writel_relaxed(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET);
125
126
127
128
129
130 ctrl_reg |= CNT_CNTRL_RESET;
131 ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
132 writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
133}
134
135
136
137
138
139
140
141
142
143static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
144{
145 struct ttc_timer_clockevent *ttce = dev_id;
146 struct ttc_timer *timer = &ttce->ttc;
147
148
149 readl_relaxed(timer->base_addr + TTC_ISR_OFFSET);
150
151 ttce->ce.event_handler(&ttce->ce);
152
153 return IRQ_HANDLED;
154}
155
156
157
158
159
160
161static cycle_t __ttc_clocksource_read(struct clocksource *cs)
162{
163 struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
164
165 return (cycle_t)readl_relaxed(timer->base_addr +
166 TTC_COUNT_VAL_OFFSET);
167}
168
169static u64 notrace ttc_sched_clock_read(void)
170{
171 return readl_relaxed(ttc_sched_clock_val_reg);
172}
173
174
175
176
177
178
179
180
181
182static int ttc_set_next_event(unsigned long cycles,
183 struct clock_event_device *evt)
184{
185 struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
186 struct ttc_timer *timer = &ttce->ttc;
187
188 ttc_set_interval(timer, cycles);
189 return 0;
190}
191
192
193
194
195
196
197static int ttc_shutdown(struct clock_event_device *evt)
198{
199 struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
200 struct ttc_timer *timer = &ttce->ttc;
201 u32 ctrl_reg;
202
203 ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
204 ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
205 writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
206 return 0;
207}
208
209static int ttc_set_periodic(struct clock_event_device *evt)
210{
211 struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
212 struct ttc_timer *timer = &ttce->ttc;
213
214 ttc_set_interval(timer,
215 DIV_ROUND_CLOSEST(ttce->ttc.freq, PRESCALE * HZ));
216 return 0;
217}
218
219static int ttc_resume(struct clock_event_device *evt)
220{
221 struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
222 struct ttc_timer *timer = &ttce->ttc;
223 u32 ctrl_reg;
224
225 ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
226 ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
227 writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
228 return 0;
229}
230
231static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
232 unsigned long event, void *data)
233{
234 struct clk_notifier_data *ndata = data;
235 struct ttc_timer *ttc = to_ttc_timer(nb);
236 struct ttc_timer_clocksource *ttccs = container_of(ttc,
237 struct ttc_timer_clocksource, ttc);
238
239 switch (event) {
240 case PRE_RATE_CHANGE:
241 {
242 u32 psv;
243 unsigned long factor, rate_low, rate_high;
244
245 if (ndata->new_rate > ndata->old_rate) {
246 factor = DIV_ROUND_CLOSEST(ndata->new_rate,
247 ndata->old_rate);
248 rate_low = ndata->old_rate;
249 rate_high = ndata->new_rate;
250 } else {
251 factor = DIV_ROUND_CLOSEST(ndata->old_rate,
252 ndata->new_rate);
253 rate_low = ndata->new_rate;
254 rate_high = ndata->old_rate;
255 }
256
257 if (!is_power_of_2(factor))
258 return NOTIFY_BAD;
259
260 if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR)
261 return NOTIFY_BAD;
262
263 factor = __ilog2_u32(factor);
264
265
266
267
268
269 ttccs->scale_clk_ctrl_reg_old =
270 readl_relaxed(ttccs->ttc.base_addr +
271 TTC_CLK_CNTRL_OFFSET);
272
273 psv = (ttccs->scale_clk_ctrl_reg_old &
274 TTC_CLK_CNTRL_PSV_MASK) >>
275 TTC_CLK_CNTRL_PSV_SHIFT;
276 if (ndata->new_rate < ndata->old_rate)
277 psv -= factor;
278 else
279 psv += factor;
280
281
282 if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT))
283 return NOTIFY_BAD;
284
285 ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old &
286 ~TTC_CLK_CNTRL_PSV_MASK;
287 ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT;
288
289
290
291 if (ndata->new_rate < ndata->old_rate)
292 return NOTIFY_DONE;
293
294
295 writel_relaxed(ttccs->scale_clk_ctrl_reg_new,
296 ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
297 break;
298 }
299 case POST_RATE_CHANGE:
300
301 if (ndata->new_rate > ndata->old_rate)
302 return NOTIFY_OK;
303
304
305 writel_relaxed(ttccs->scale_clk_ctrl_reg_new,
306 ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
307 break;
308
309 case ABORT_RATE_CHANGE:
310
311 if (ndata->new_rate < ndata->old_rate)
312 return NOTIFY_OK;
313
314
315 writel_relaxed(ttccs->scale_clk_ctrl_reg_old,
316 ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
317
318 default:
319 return NOTIFY_DONE;
320 }
321
322 return NOTIFY_DONE;
323}
324
325static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
326 u32 timer_width)
327{
328 struct ttc_timer_clocksource *ttccs;
329 int err;
330
331 ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
332 if (!ttccs)
333 return -ENOMEM;
334
335 ttccs->ttc.clk = clk;
336
337 err = clk_prepare_enable(ttccs->ttc.clk);
338 if (err) {
339 kfree(ttccs);
340 return err;
341 }
342
343 ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
344
345 ttccs->ttc.clk_rate_change_nb.notifier_call =
346 ttc_rate_change_clocksource_cb;
347 ttccs->ttc.clk_rate_change_nb.next = NULL;
348
349 err = clk_notifier_register(ttccs->ttc.clk,
350 &ttccs->ttc.clk_rate_change_nb);
351 if (err)
352 pr_warn("Unable to register clock notifier.\n");
353
354 ttccs->ttc.base_addr = base;
355 ttccs->cs.name = "ttc_clocksource";
356 ttccs->cs.rating = 200;
357 ttccs->cs.read = __ttc_clocksource_read;
358 ttccs->cs.mask = CLOCKSOURCE_MASK(timer_width);
359 ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
360
361
362
363
364
365
366 writel_relaxed(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET);
367 writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
368 ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
369 writel_relaxed(CNT_CNTRL_RESET,
370 ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
371
372 err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
373 if (err) {
374 kfree(ttccs);
375 return err;
376 }
377
378 ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
379 sched_clock_register(ttc_sched_clock_read, timer_width,
380 ttccs->ttc.freq / PRESCALE);
381
382 return 0;
383}
384
385static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
386 unsigned long event, void *data)
387{
388 struct clk_notifier_data *ndata = data;
389 struct ttc_timer *ttc = to_ttc_timer(nb);
390 struct ttc_timer_clockevent *ttcce = container_of(ttc,
391 struct ttc_timer_clockevent, ttc);
392
393 switch (event) {
394 case POST_RATE_CHANGE:
395
396 ttc->freq = ndata->new_rate;
397
398 clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);
399
400
401 case PRE_RATE_CHANGE:
402 case ABORT_RATE_CHANGE:
403 default:
404 return NOTIFY_DONE;
405 }
406}
407
408static int __init ttc_setup_clockevent(struct clk *clk,
409 void __iomem *base, u32 irq)
410{
411 struct ttc_timer_clockevent *ttcce;
412 int err;
413
414 ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
415 if (!ttcce)
416 return -ENOMEM;
417
418 ttcce->ttc.clk = clk;
419
420 err = clk_prepare_enable(ttcce->ttc.clk);
421 if (err) {
422 kfree(ttcce);
423 return err;
424 }
425
426 ttcce->ttc.clk_rate_change_nb.notifier_call =
427 ttc_rate_change_clockevent_cb;
428 ttcce->ttc.clk_rate_change_nb.next = NULL;
429
430 err = clk_notifier_register(ttcce->ttc.clk,
431 &ttcce->ttc.clk_rate_change_nb);
432 if (err) {
433 pr_warn("Unable to register clock notifier.\n");
434 return err;
435 }
436
437 ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
438
439 ttcce->ttc.base_addr = base;
440 ttcce->ce.name = "ttc_clockevent";
441 ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
442 ttcce->ce.set_next_event = ttc_set_next_event;
443 ttcce->ce.set_state_shutdown = ttc_shutdown;
444 ttcce->ce.set_state_periodic = ttc_set_periodic;
445 ttcce->ce.set_state_oneshot = ttc_shutdown;
446 ttcce->ce.tick_resume = ttc_resume;
447 ttcce->ce.rating = 200;
448 ttcce->ce.irq = irq;
449 ttcce->ce.cpumask = cpu_possible_mask;
450
451
452
453
454
455
456 writel_relaxed(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
457 writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
458 ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
459 writel_relaxed(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
460
461 err = request_irq(irq, ttc_clock_event_interrupt,
462 IRQF_TIMER, ttcce->ce.name, ttcce);
463 if (err) {
464 kfree(ttcce);
465 return err;
466 }
467
468 clockevents_config_and_register(&ttcce->ce,
469 ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
470
471 return 0;
472}
473
474
475
476
477
478
479
480static int __init ttc_timer_init(struct device_node *timer)
481{
482 unsigned int irq;
483 void __iomem *timer_baseaddr;
484 struct clk *clk_cs, *clk_ce;
485 static int initialized;
486 int clksel, ret;
487 u32 timer_width = 16;
488
489 if (initialized)
490 return 0;
491
492 initialized = 1;
493
494
495
496
497
498
499 timer_baseaddr = of_iomap(timer, 0);
500 if (!timer_baseaddr) {
501 pr_err("ERROR: invalid timer base address\n");
502 return -ENXIO;
503 }
504
505 irq = irq_of_parse_and_map(timer, 1);
506 if (irq <= 0) {
507 pr_err("ERROR: invalid interrupt number\n");
508 return -EINVAL;
509 }
510
511 of_property_read_u32(timer, "timer-width", &timer_width);
512
513 clksel = readl_relaxed(timer_baseaddr + TTC_CLK_CNTRL_OFFSET);
514 clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);
515 clk_cs = of_clk_get(timer, clksel);
516 if (IS_ERR(clk_cs)) {
517 pr_err("ERROR: timer input clock not found\n");
518 return PTR_ERR(clk_cs);
519 }
520
521 clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
522 clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);
523 clk_ce = of_clk_get(timer, clksel);
524 if (IS_ERR(clk_ce)) {
525 pr_err("ERROR: timer input clock not found\n");
526 return PTR_ERR(clk_ce);
527 }
528
529 ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
530 if (ret)
531 return ret;
532
533 ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
534 if (ret)
535 return ret;
536
537 pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
538
539 return 0;
540}
541
542CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
543