1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/clk.h>
19#include <linux/interrupt.h>
20#include <linux/clockchips.h>
21#include <linux/clocksource.h>
22#include <linux/of_address.h>
23#include <linux/of_irq.h>
24#include <linux/slab.h>
25#include <linux/sched_clock.h>
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#define TTC_CLK_CNTRL_OFFSET 0x00
47#define TTC_CNT_CNTRL_OFFSET 0x0C
48#define TTC_COUNT_VAL_OFFSET 0x18
49#define TTC_INTR_VAL_OFFSET 0x24
50#define TTC_ISR_OFFSET 0x54
51#define TTC_IER_OFFSET 0x60
52
53#define TTC_CNT_CNTRL_DISABLE_MASK 0x1
54
55#define TTC_CLK_CNTRL_CSRC_MASK (1 << 5)
56#define TTC_CLK_CNTRL_PSV_MASK 0x1e
57#define TTC_CLK_CNTRL_PSV_SHIFT 1
58
59
60
61
62
63#define PRESCALE_EXPONENT 11
64#define PRESCALE 2048
65#define CLK_CNTRL_PRESCALE ((PRESCALE_EXPONENT - 1) << 1)
66#define CLK_CNTRL_PRESCALE_EN 1
67#define CNT_CNTRL_RESET (1 << 4)
68
69#define MAX_F_ERR 50
70
71
72
73
74
75
76
77
78
79struct ttc_timer {
80 void __iomem *base_addr;
81 unsigned long freq;
82 struct clk *clk;
83 struct notifier_block clk_rate_change_nb;
84};
85
86#define to_ttc_timer(x) \
87 container_of(x, struct ttc_timer, clk_rate_change_nb)
88
89struct ttc_timer_clocksource {
90 u32 scale_clk_ctrl_reg_old;
91 u32 scale_clk_ctrl_reg_new;
92 struct ttc_timer ttc;
93 struct clocksource cs;
94};
95
96#define to_ttc_timer_clksrc(x) \
97 container_of(x, struct ttc_timer_clocksource, cs)
98
99struct ttc_timer_clockevent {
100 struct ttc_timer ttc;
101 struct clock_event_device ce;
102};
103
104#define to_ttc_timer_clkevent(x) \
105 container_of(x, struct ttc_timer_clockevent, ce)
106
107static void __iomem *ttc_sched_clock_val_reg;
108
109
110
111
112
113
114
115static void ttc_set_interval(struct ttc_timer *timer,
116 unsigned long cycles)
117{
118 u32 ctrl_reg;
119
120
121 ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
122 ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
123 writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
124
125 writel_relaxed(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET);
126
127
128
129
130
131 ctrl_reg |= CNT_CNTRL_RESET;
132 ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
133 writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
134}
135
136
137
138
139
140
141
142
143
144static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id)
145{
146 struct ttc_timer_clockevent *ttce = dev_id;
147 struct ttc_timer *timer = &ttce->ttc;
148
149
150 readl_relaxed(timer->base_addr + TTC_ISR_OFFSET);
151
152 ttce->ce.event_handler(&ttce->ce);
153
154 return IRQ_HANDLED;
155}
156
157
158
159
160
161
162static u64 __ttc_clocksource_read(struct clocksource *cs)
163{
164 struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc;
165
166 return (u64)readl_relaxed(timer->base_addr +
167 TTC_COUNT_VAL_OFFSET);
168}
169
170static u64 notrace ttc_sched_clock_read(void)
171{
172 return readl_relaxed(ttc_sched_clock_val_reg);
173}
174
175
176
177
178
179
180
181
182
183static int ttc_set_next_event(unsigned long cycles,
184 struct clock_event_device *evt)
185{
186 struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
187 struct ttc_timer *timer = &ttce->ttc;
188
189 ttc_set_interval(timer, cycles);
190 return 0;
191}
192
193
194
195
196
197
198static int ttc_shutdown(struct clock_event_device *evt)
199{
200 struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
201 struct ttc_timer *timer = &ttce->ttc;
202 u32 ctrl_reg;
203
204 ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
205 ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
206 writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
207 return 0;
208}
209
210static int ttc_set_periodic(struct clock_event_device *evt)
211{
212 struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
213 struct ttc_timer *timer = &ttce->ttc;
214
215 ttc_set_interval(timer,
216 DIV_ROUND_CLOSEST(ttce->ttc.freq, PRESCALE * HZ));
217 return 0;
218}
219
220static int ttc_resume(struct clock_event_device *evt)
221{
222 struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
223 struct ttc_timer *timer = &ttce->ttc;
224 u32 ctrl_reg;
225
226 ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
227 ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
228 writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
229 return 0;
230}
231
232static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
233 unsigned long event, void *data)
234{
235 struct clk_notifier_data *ndata = data;
236 struct ttc_timer *ttc = to_ttc_timer(nb);
237 struct ttc_timer_clocksource *ttccs = container_of(ttc,
238 struct ttc_timer_clocksource, ttc);
239
240 switch (event) {
241 case PRE_RATE_CHANGE:
242 {
243 u32 psv;
244 unsigned long factor, rate_low, rate_high;
245
246 if (ndata->new_rate > ndata->old_rate) {
247 factor = DIV_ROUND_CLOSEST(ndata->new_rate,
248 ndata->old_rate);
249 rate_low = ndata->old_rate;
250 rate_high = ndata->new_rate;
251 } else {
252 factor = DIV_ROUND_CLOSEST(ndata->old_rate,
253 ndata->new_rate);
254 rate_low = ndata->new_rate;
255 rate_high = ndata->old_rate;
256 }
257
258 if (!is_power_of_2(factor))
259 return NOTIFY_BAD;
260
261 if (abs(rate_high - (factor * rate_low)) > MAX_F_ERR)
262 return NOTIFY_BAD;
263
264 factor = __ilog2_u32(factor);
265
266
267
268
269
270 ttccs->scale_clk_ctrl_reg_old =
271 readl_relaxed(ttccs->ttc.base_addr +
272 TTC_CLK_CNTRL_OFFSET);
273
274 psv = (ttccs->scale_clk_ctrl_reg_old &
275 TTC_CLK_CNTRL_PSV_MASK) >>
276 TTC_CLK_CNTRL_PSV_SHIFT;
277 if (ndata->new_rate < ndata->old_rate)
278 psv -= factor;
279 else
280 psv += factor;
281
282
283 if (psv & ~(TTC_CLK_CNTRL_PSV_MASK >> TTC_CLK_CNTRL_PSV_SHIFT))
284 return NOTIFY_BAD;
285
286 ttccs->scale_clk_ctrl_reg_new = ttccs->scale_clk_ctrl_reg_old &
287 ~TTC_CLK_CNTRL_PSV_MASK;
288 ttccs->scale_clk_ctrl_reg_new |= psv << TTC_CLK_CNTRL_PSV_SHIFT;
289
290
291
292 if (ndata->new_rate < ndata->old_rate)
293 return NOTIFY_DONE;
294
295
296 writel_relaxed(ttccs->scale_clk_ctrl_reg_new,
297 ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
298 break;
299 }
300 case POST_RATE_CHANGE:
301
302 if (ndata->new_rate > ndata->old_rate)
303 return NOTIFY_OK;
304
305
306 writel_relaxed(ttccs->scale_clk_ctrl_reg_new,
307 ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
308 break;
309
310 case ABORT_RATE_CHANGE:
311
312 if (ndata->new_rate < ndata->old_rate)
313 return NOTIFY_OK;
314
315
316 writel_relaxed(ttccs->scale_clk_ctrl_reg_old,
317 ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
318
319 default:
320 return NOTIFY_DONE;
321 }
322
323 return NOTIFY_DONE;
324}
325
326static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
327 u32 timer_width)
328{
329 struct ttc_timer_clocksource *ttccs;
330 int err;
331
332 ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
333 if (!ttccs)
334 return -ENOMEM;
335
336 ttccs->ttc.clk = clk;
337
338 err = clk_prepare_enable(ttccs->ttc.clk);
339 if (err) {
340 kfree(ttccs);
341 return err;
342 }
343
344 ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
345
346 ttccs->ttc.clk_rate_change_nb.notifier_call =
347 ttc_rate_change_clocksource_cb;
348 ttccs->ttc.clk_rate_change_nb.next = NULL;
349
350 err = clk_notifier_register(ttccs->ttc.clk,
351 &ttccs->ttc.clk_rate_change_nb);
352 if (err)
353 pr_warn("Unable to register clock notifier.\n");
354
355 ttccs->ttc.base_addr = base;
356 ttccs->cs.name = "ttc_clocksource";
357 ttccs->cs.rating = 200;
358 ttccs->cs.read = __ttc_clocksource_read;
359 ttccs->cs.mask = CLOCKSOURCE_MASK(timer_width);
360 ttccs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
361
362
363
364
365
366
367 writel_relaxed(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET);
368 writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
369 ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
370 writel_relaxed(CNT_CNTRL_RESET,
371 ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
372
373 err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
374 if (err) {
375 kfree(ttccs);
376 return err;
377 }
378
379 ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
380 sched_clock_register(ttc_sched_clock_read, timer_width,
381 ttccs->ttc.freq / PRESCALE);
382
383 return 0;
384}
385
386static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
387 unsigned long event, void *data)
388{
389 struct clk_notifier_data *ndata = data;
390 struct ttc_timer *ttc = to_ttc_timer(nb);
391 struct ttc_timer_clockevent *ttcce = container_of(ttc,
392 struct ttc_timer_clockevent, ttc);
393
394 switch (event) {
395 case POST_RATE_CHANGE:
396
397 ttc->freq = ndata->new_rate;
398
399 clockevents_update_freq(&ttcce->ce, ndata->new_rate / PRESCALE);
400
401
402 case PRE_RATE_CHANGE:
403 case ABORT_RATE_CHANGE:
404 default:
405 return NOTIFY_DONE;
406 }
407}
408
409static int __init ttc_setup_clockevent(struct clk *clk,
410 void __iomem *base, u32 irq)
411{
412 struct ttc_timer_clockevent *ttcce;
413 int err;
414
415 ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
416 if (!ttcce)
417 return -ENOMEM;
418
419 ttcce->ttc.clk = clk;
420
421 err = clk_prepare_enable(ttcce->ttc.clk);
422 if (err) {
423 kfree(ttcce);
424 return err;
425 }
426
427 ttcce->ttc.clk_rate_change_nb.notifier_call =
428 ttc_rate_change_clockevent_cb;
429 ttcce->ttc.clk_rate_change_nb.next = NULL;
430
431 err = clk_notifier_register(ttcce->ttc.clk,
432 &ttcce->ttc.clk_rate_change_nb);
433 if (err) {
434 pr_warn("Unable to register clock notifier.\n");
435 return err;
436 }
437
438 ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
439
440 ttcce->ttc.base_addr = base;
441 ttcce->ce.name = "ttc_clockevent";
442 ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
443 ttcce->ce.set_next_event = ttc_set_next_event;
444 ttcce->ce.set_state_shutdown = ttc_shutdown;
445 ttcce->ce.set_state_periodic = ttc_set_periodic;
446 ttcce->ce.set_state_oneshot = ttc_shutdown;
447 ttcce->ce.tick_resume = ttc_resume;
448 ttcce->ce.rating = 200;
449 ttcce->ce.irq = irq;
450 ttcce->ce.cpumask = cpu_possible_mask;
451
452
453
454
455
456
457 writel_relaxed(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
458 writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN,
459 ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET);
460 writel_relaxed(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET);
461
462 err = request_irq(irq, ttc_clock_event_interrupt,
463 IRQF_TIMER, ttcce->ce.name, ttcce);
464 if (err) {
465 kfree(ttcce);
466 return err;
467 }
468
469 clockevents_config_and_register(&ttcce->ce,
470 ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
471
472 return 0;
473}
474
475
476
477
478
479
480
481static int __init ttc_timer_init(struct device_node *timer)
482{
483 unsigned int irq;
484 void __iomem *timer_baseaddr;
485 struct clk *clk_cs, *clk_ce;
486 static int initialized;
487 int clksel, ret;
488 u32 timer_width = 16;
489
490 if (initialized)
491 return 0;
492
493 initialized = 1;
494
495
496
497
498
499
500 timer_baseaddr = of_iomap(timer, 0);
501 if (!timer_baseaddr) {
502 pr_err("ERROR: invalid timer base address\n");
503 return -ENXIO;
504 }
505
506 irq = irq_of_parse_and_map(timer, 1);
507 if (irq <= 0) {
508 pr_err("ERROR: invalid interrupt number\n");
509 return -EINVAL;
510 }
511
512 of_property_read_u32(timer, "timer-width", &timer_width);
513
514 clksel = readl_relaxed(timer_baseaddr + TTC_CLK_CNTRL_OFFSET);
515 clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);
516 clk_cs = of_clk_get(timer, clksel);
517 if (IS_ERR(clk_cs)) {
518 pr_err("ERROR: timer input clock not found\n");
519 return PTR_ERR(clk_cs);
520 }
521
522 clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
523 clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK);
524 clk_ce = of_clk_get(timer, clksel);
525 if (IS_ERR(clk_ce)) {
526 pr_err("ERROR: timer input clock not found\n");
527 return PTR_ERR(clk_ce);
528 }
529
530 ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
531 if (ret)
532 return ret;
533
534 ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
535 if (ret)
536 return ret;
537
538 pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
539
540 return 0;
541}
542
543TIMER_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
544