1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#include <linux/interrupt.h>
33#include <linux/clk.h>
34#include <linux/clk-provider.h>
35#include <linux/clocksource.h>
36#include <linux/clockchips.h>
37#include <linux/cpu.h>
38#include <linux/of.h>
39#include <linux/of_irq.h>
40#include <asm/irq.h>
41#include <asm/arcregs.h>
42
43#include <asm/mcip.h>
44
45
46#define ARC_REG_TIMER0_LIMIT 0x23
47#define ARC_REG_TIMER0_CTRL 0x22
48#define ARC_REG_TIMER0_CNT 0x21
49#define ARC_REG_TIMER1_LIMIT 0x102
50#define ARC_REG_TIMER1_CTRL 0x101
51#define ARC_REG_TIMER1_CNT 0x100
52
53#define TIMER_CTRL_IE (1 << 0)
54#define TIMER_CTRL_NH (1 << 1)
55
56#define ARC_TIMER_MAX 0xFFFFFFFF
57
58static unsigned long arc_timer_freq;
59
60static int noinline arc_get_timer_clk(struct device_node *node)
61{
62 struct clk *clk;
63 int ret;
64
65 clk = of_clk_get(node, 0);
66 if (IS_ERR(clk)) {
67 pr_err("timer missing clk");
68 return PTR_ERR(clk);
69 }
70
71 ret = clk_prepare_enable(clk);
72 if (ret) {
73 pr_err("Couldn't enable parent clk\n");
74 return ret;
75 }
76
77 arc_timer_freq = clk_get_rate(clk);
78
79 return 0;
80}
81
82
83
84#ifdef CONFIG_ARC_HAS_GFRC
85
86static cycle_t arc_read_gfrc(struct clocksource *cs)
87{
88 unsigned long flags;
89 union {
90#ifdef CONFIG_CPU_BIG_ENDIAN
91 struct { u32 h, l; };
92#else
93 struct { u32 l, h; };
94#endif
95 cycle_t full;
96 } stamp;
97
98 local_irq_save(flags);
99
100 __mcip_cmd(CMD_GFRC_READ_LO, 0);
101 stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
102
103 __mcip_cmd(CMD_GFRC_READ_HI, 0);
104 stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
105
106 local_irq_restore(flags);
107
108 return stamp.full;
109}
110
111static struct clocksource arc_counter_gfrc = {
112 .name = "ARConnect GFRC",
113 .rating = 400,
114 .read = arc_read_gfrc,
115 .mask = CLOCKSOURCE_MASK(64),
116 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
117};
118
119static int __init arc_cs_setup_gfrc(struct device_node *node)
120{
121 int exists = cpuinfo_arc700[0].extn.gfrc;
122 int ret;
123
124 if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
125 return -ENXIO;
126
127 ret = arc_get_timer_clk(node);
128 if (ret)
129 return ret;
130
131 return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
132}
133CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
134
135#endif
136
137#ifdef CONFIG_ARC_HAS_RTC
138
139#define AUX_RTC_CTRL 0x103
140#define AUX_RTC_LOW 0x104
141#define AUX_RTC_HIGH 0x105
142
143static cycle_t arc_read_rtc(struct clocksource *cs)
144{
145 unsigned long status;
146 union {
147#ifdef CONFIG_CPU_BIG_ENDIAN
148 struct { u32 high, low; };
149#else
150 struct { u32 low, high; };
151#endif
152 cycle_t full;
153 } stamp;
154
155
156 __asm__ __volatile(
157 "1: \n"
158 " lr %0, [AUX_RTC_LOW] \n"
159 " lr %1, [AUX_RTC_HIGH] \n"
160 " lr %2, [AUX_RTC_CTRL] \n"
161 " bbit0.nt %2, 31, 1b \n"
162 : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
163
164 return stamp.full;
165}
166
167static struct clocksource arc_counter_rtc = {
168 .name = "ARCv2 RTC",
169 .rating = 350,
170 .read = arc_read_rtc,
171 .mask = CLOCKSOURCE_MASK(64),
172 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
173};
174
175static int __init arc_cs_setup_rtc(struct device_node *node)
176{
177 int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
178 int ret;
179
180 if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
181 return -ENXIO;
182
183
184 if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
185 return -EINVAL;
186
187 ret = arc_get_timer_clk(node);
188 if (ret)
189 return ret;
190
191 write_aux_reg(AUX_RTC_CTRL, 1);
192
193 return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
194}
195CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
196
197#endif
198
199
200
201
202
203static cycle_t arc_read_timer1(struct clocksource *cs)
204{
205 return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
206}
207
208static struct clocksource arc_counter_timer1 = {
209 .name = "ARC Timer1",
210 .rating = 300,
211 .read = arc_read_timer1,
212 .mask = CLOCKSOURCE_MASK(32),
213 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
214};
215
216static int __init arc_cs_setup_timer1(struct device_node *node)
217{
218 int ret;
219
220
221 if (IS_ENABLED(CONFIG_SMP))
222 return -EINVAL;
223
224 ret = arc_get_timer_clk(node);
225 if (ret)
226 return ret;
227
228 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
229 write_aux_reg(ARC_REG_TIMER1_CNT, 0);
230 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
231
232 return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
233}
234
235
236
237static int arc_timer_irq;
238
239
240
241
242
243static void arc_timer_event_setup(unsigned int cycles)
244{
245 write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
246 write_aux_reg(ARC_REG_TIMER0_CNT, 0);
247
248 write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
249}
250
251
252static int arc_clkevent_set_next_event(unsigned long delta,
253 struct clock_event_device *dev)
254{
255 arc_timer_event_setup(delta);
256 return 0;
257}
258
259static int arc_clkevent_set_periodic(struct clock_event_device *dev)
260{
261
262
263
264
265 arc_timer_event_setup(arc_timer_freq / HZ);
266 return 0;
267}
268
269static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
270 .name = "ARC Timer0",
271 .features = CLOCK_EVT_FEAT_ONESHOT |
272 CLOCK_EVT_FEAT_PERIODIC,
273 .rating = 300,
274 .set_next_event = arc_clkevent_set_next_event,
275 .set_state_periodic = arc_clkevent_set_periodic,
276};
277
278static irqreturn_t timer_irq_handler(int irq, void *dev_id)
279{
280
281
282
283
284 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
285 int irq_reenable = clockevent_state_periodic(evt);
286
287
288
289
290
291
292 write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
293
294 evt->event_handler(evt);
295
296 return IRQ_HANDLED;
297}
298
299
300static int arc_timer_starting_cpu(unsigned int cpu)
301{
302 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
303
304 evt->cpumask = cpumask_of(smp_processor_id());
305
306 clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
307 enable_percpu_irq(arc_timer_irq, 0);
308 return 0;
309}
310
311static int arc_timer_dying_cpu(unsigned int cpu)
312{
313 disable_percpu_irq(arc_timer_irq);
314 return 0;
315}
316
317
318
319
320static int __init arc_clockevent_setup(struct device_node *node)
321{
322 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
323 int ret;
324
325 arc_timer_irq = irq_of_parse_and_map(node, 0);
326 if (arc_timer_irq <= 0) {
327 pr_err("clockevent: missing irq");
328 return -EINVAL;
329 }
330
331 ret = arc_get_timer_clk(node);
332 if (ret) {
333 pr_err("clockevent: missing clk");
334 return ret;
335 }
336
337
338 ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
339 "Timer0 (per-cpu-tick)", evt);
340 if (ret) {
341 pr_err("clockevent: unable to request irq\n");
342 return ret;
343 }
344
345 ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
346 "AP_ARC_TIMER_STARTING",
347 arc_timer_starting_cpu,
348 arc_timer_dying_cpu);
349 if (ret) {
350 pr_err("Failed to setup hotplug state");
351 return ret;
352 }
353 return 0;
354}
355
356static int __init arc_of_timer_init(struct device_node *np)
357{
358 static int init_count = 0;
359 int ret;
360
361 if (!init_count) {
362 init_count = 1;
363 ret = arc_clockevent_setup(np);
364 } else {
365 ret = arc_cs_setup_timer1(np);
366 }
367
368 return ret;
369}
370CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
371
372
373
374
375void __init time_init(void)
376{
377 of_clk_init(NULL);
378 clocksource_probe();
379}
380