1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) "%s: " fmt, __func__
17
18#include <linux/clk.h>
19#include <linux/clockchips.h>
20#include <linux/clocksource.h>
21#include <linux/delay.h>
22#include <linux/interrupt.h>
23#include <linux/irq.h>
24#include <linux/kernel.h>
25#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/sched_clock.h>
29
30#define LPC32XX_TIMER_IR 0x000
31#define LPC32XX_TIMER_IR_MR0INT BIT(0)
32#define LPC32XX_TIMER_TCR 0x004
33#define LPC32XX_TIMER_TCR_CEN BIT(0)
34#define LPC32XX_TIMER_TCR_CRST BIT(1)
35#define LPC32XX_TIMER_TC 0x008
36#define LPC32XX_TIMER_PR 0x00c
37#define LPC32XX_TIMER_MCR 0x014
38#define LPC32XX_TIMER_MCR_MR0I BIT(0)
39#define LPC32XX_TIMER_MCR_MR0R BIT(1)
40#define LPC32XX_TIMER_MCR_MR0S BIT(2)
41#define LPC32XX_TIMER_MR0 0x018
42#define LPC32XX_TIMER_CTCR 0x070
43
44struct lpc32xx_clock_event_ddata {
45 struct clock_event_device evtdev;
46 void __iomem *base;
47 u32 ticks_per_jiffy;
48};
49
50
51static void __iomem *clocksource_timer_counter;
52
53static u64 notrace lpc32xx_read_sched_clock(void)
54{
55 return readl(clocksource_timer_counter);
56}
57
58static unsigned long lpc32xx_delay_timer_read(void)
59{
60 return readl(clocksource_timer_counter);
61}
62
63static struct delay_timer lpc32xx_delay_timer = {
64 .read_current_timer = lpc32xx_delay_timer_read,
65};
66
67static int lpc32xx_clkevt_next_event(unsigned long delta,
68 struct clock_event_device *evtdev)
69{
70 struct lpc32xx_clock_event_ddata *ddata =
71 container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
72
73
74
75
76
77
78
79 writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
80 writel_relaxed(delta, ddata->base + LPC32XX_TIMER_MR0);
81 writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
82
83 return 0;
84}
85
86static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev)
87{
88 struct lpc32xx_clock_event_ddata *ddata =
89 container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
90
91
92 writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR);
93
94 return 0;
95}
96
97static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev)
98{
99 struct lpc32xx_clock_event_ddata *ddata =
100 container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
101
102
103
104
105
106 writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR);
107
108
109 writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
110 LPC32XX_TIMER_MCR_MR0S, ddata->base + LPC32XX_TIMER_MCR);
111 return 0;
112}
113
114static int lpc32xx_clkevt_periodic(struct clock_event_device *evtdev)
115{
116 struct lpc32xx_clock_event_ddata *ddata =
117 container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
118
119
120 writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R,
121 ddata->base + LPC32XX_TIMER_MCR);
122
123
124
125
126
127 writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
128 writel_relaxed(ddata->ticks_per_jiffy, ddata->base + LPC32XX_TIMER_MR0);
129 writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
130
131 return 0;
132}
133
134static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
135{
136 struct lpc32xx_clock_event_ddata *ddata = dev_id;
137
138
139 writel_relaxed(LPC32XX_TIMER_IR_MR0INT, ddata->base + LPC32XX_TIMER_IR);
140
141 ddata->evtdev.event_handler(&ddata->evtdev);
142
143 return IRQ_HANDLED;
144}
145
146static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = {
147 .evtdev = {
148 .name = "lpc3220 clockevent",
149 .features = CLOCK_EVT_FEAT_ONESHOT |
150 CLOCK_EVT_FEAT_PERIODIC,
151 .rating = 300,
152 .set_next_event = lpc32xx_clkevt_next_event,
153 .set_state_shutdown = lpc32xx_clkevt_shutdown,
154 .set_state_oneshot = lpc32xx_clkevt_oneshot,
155 .set_state_periodic = lpc32xx_clkevt_periodic,
156 },
157};
158
159static int __init lpc32xx_clocksource_init(struct device_node *np)
160{
161 void __iomem *base;
162 unsigned long rate;
163 struct clk *clk;
164 int ret;
165
166 clk = of_clk_get_by_name(np, "timerclk");
167 if (IS_ERR(clk)) {
168 pr_err("clock get failed (%ld)\n", PTR_ERR(clk));
169 return PTR_ERR(clk);
170 }
171
172 ret = clk_prepare_enable(clk);
173 if (ret) {
174 pr_err("clock enable failed (%d)\n", ret);
175 goto err_clk_enable;
176 }
177
178 base = of_iomap(np, 0);
179 if (!base) {
180 pr_err("unable to map registers\n");
181 ret = -EADDRNOTAVAIL;
182 goto err_iomap;
183 }
184
185
186
187
188
189
190 writel_relaxed(LPC32XX_TIMER_TCR_CRST, base + LPC32XX_TIMER_TCR);
191 writel_relaxed(0, base + LPC32XX_TIMER_PR);
192 writel_relaxed(0, base + LPC32XX_TIMER_MCR);
193 writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
194 writel_relaxed(LPC32XX_TIMER_TCR_CEN, base + LPC32XX_TIMER_TCR);
195
196 rate = clk_get_rate(clk);
197 ret = clocksource_mmio_init(base + LPC32XX_TIMER_TC, "lpc3220 timer",
198 rate, 300, 32, clocksource_mmio_readl_up);
199 if (ret) {
200 pr_err("failed to init clocksource (%d)\n", ret);
201 goto err_clocksource_init;
202 }
203
204 clocksource_timer_counter = base + LPC32XX_TIMER_TC;
205 lpc32xx_delay_timer.freq = rate;
206 register_current_timer_delay(&lpc32xx_delay_timer);
207 sched_clock_register(lpc32xx_read_sched_clock, 32, rate);
208
209 return 0;
210
211err_clocksource_init:
212 iounmap(base);
213err_iomap:
214 clk_disable_unprepare(clk);
215err_clk_enable:
216 clk_put(clk);
217 return ret;
218}
219
220static int __init lpc32xx_clockevent_init(struct device_node *np)
221{
222 void __iomem *base;
223 unsigned long rate;
224 struct clk *clk;
225 int ret, irq;
226
227 clk = of_clk_get_by_name(np, "timerclk");
228 if (IS_ERR(clk)) {
229 pr_err("clock get failed (%ld)\n", PTR_ERR(clk));
230 return PTR_ERR(clk);
231 }
232
233 ret = clk_prepare_enable(clk);
234 if (ret) {
235 pr_err("clock enable failed (%d)\n", ret);
236 goto err_clk_enable;
237 }
238
239 base = of_iomap(np, 0);
240 if (!base) {
241 pr_err("unable to map registers\n");
242 ret = -EADDRNOTAVAIL;
243 goto err_iomap;
244 }
245
246 irq = irq_of_parse_and_map(np, 0);
247 if (!irq) {
248 pr_err("get irq failed\n");
249 ret = -ENOENT;
250 goto err_irq;
251 }
252
253
254
255
256
257 writel_relaxed(0, base + LPC32XX_TIMER_TCR);
258 writel_relaxed(0, base + LPC32XX_TIMER_PR);
259 writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
260 writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR);
261
262 rate = clk_get_rate(clk);
263 lpc32xx_clk_event_ddata.base = base;
264 lpc32xx_clk_event_ddata.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
265 clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev,
266 rate, 1, -1);
267
268 ret = request_irq(irq, lpc32xx_clock_event_handler,
269 IRQF_TIMER | IRQF_IRQPOLL, "lpc3220 clockevent",
270 &lpc32xx_clk_event_ddata);
271 if (ret) {
272 pr_err("request irq failed\n");
273 goto err_irq;
274 }
275
276 return 0;
277
278err_irq:
279 iounmap(base);
280err_iomap:
281 clk_disable_unprepare(clk);
282err_clk_enable:
283 clk_put(clk);
284 return ret;
285}
286
287
288
289
290
291static int __init lpc32xx_timer_init(struct device_node *np)
292{
293 static int has_clocksource, has_clockevent;
294 int ret = 0;
295
296 if (!has_clocksource) {
297 ret = lpc32xx_clocksource_init(np);
298 if (!ret) {
299 has_clocksource = 1;
300 return 0;
301 }
302 }
303
304 if (!has_clockevent) {
305 ret = lpc32xx_clockevent_init(np);
306 if (!ret) {
307 has_clockevent = 1;
308 return 0;
309 }
310 }
311
312 return ret;
313}
314TIMER_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
315