1
2
3
4
5
6
7
8
9
10
11
12#define pr_fmt(fmt) "arm_arch_timer: " fmt
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
19#include <linux/cpu_pm.h>
20#include <linux/clockchips.h>
21#include <linux/clocksource.h>
22#include <linux/interrupt.h>
23#include <linux/of_irq.h>
24#include <linux/of_address.h>
25#include <linux/io.h>
26#include <linux/slab.h>
27#include <linux/sched/clock.h>
28#include <linux/sched_clock.h>
29#include <linux/acpi.h>
30
31#include <asm/arch_timer.h>
32#include <asm/virt.h>
33
34#include <clocksource/arm_arch_timer.h>
35
36#undef pr_fmt
37#define pr_fmt(fmt) "arch_timer: " fmt
38
39#define CNTTIDR 0x08
40#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
41
42#define CNTACR(n) (0x40 + ((n) * 4))
43#define CNTACR_RPCT BIT(0)
44#define CNTACR_RVCT BIT(1)
45#define CNTACR_RFRQ BIT(2)
46#define CNTACR_RVOFF BIT(3)
47#define CNTACR_RWVT BIT(4)
48#define CNTACR_RWPT BIT(5)
49
50#define CNTVCT_LO 0x08
51#define CNTVCT_HI 0x0c
52#define CNTFRQ 0x10
53#define CNTP_TVAL 0x28
54#define CNTP_CTL 0x2c
55#define CNTV_TVAL 0x38
56#define CNTV_CTL 0x3c
57
58static unsigned arch_timers_present __initdata;
59
60static void __iomem *arch_counter_base;
61
62struct arch_timer {
63 void __iomem *base;
64 struct clock_event_device evt;
65};
66
67#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
68
69static u32 arch_timer_rate;
70static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI];
71
72static struct clock_event_device __percpu *arch_timer_evt;
73
74static enum arch_timer_ppi_nr arch_timer_uses_ppi = ARCH_TIMER_VIRT_PPI;
75static bool arch_timer_c3stop;
76static bool arch_timer_mem_use_virtual;
77static bool arch_counter_suspend_stop;
78#ifdef CONFIG_GENERIC_GETTIMEOFDAY
79static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
80#else
81static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_NONE;
82#endif
83
84static cpumask_t evtstrm_available = CPU_MASK_NONE;
85static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
86
87static int __init early_evtstrm_cfg(char *buf)
88{
89 return strtobool(buf, &evtstrm_enable);
90}
91early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
92
93
94
95
96
97static __always_inline
98void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
99 struct clock_event_device *clk)
100{
101 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
102 struct arch_timer *timer = to_arch_timer(clk);
103 switch (reg) {
104 case ARCH_TIMER_REG_CTRL:
105 writel_relaxed(val, timer->base + CNTP_CTL);
106 break;
107 case ARCH_TIMER_REG_TVAL:
108 writel_relaxed(val, timer->base + CNTP_TVAL);
109 break;
110 }
111 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
112 struct arch_timer *timer = to_arch_timer(clk);
113 switch (reg) {
114 case ARCH_TIMER_REG_CTRL:
115 writel_relaxed(val, timer->base + CNTV_CTL);
116 break;
117 case ARCH_TIMER_REG_TVAL:
118 writel_relaxed(val, timer->base + CNTV_TVAL);
119 break;
120 }
121 } else {
122 arch_timer_reg_write_cp15(access, reg, val);
123 }
124}
125
126static __always_inline
127u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
128 struct clock_event_device *clk)
129{
130 u32 val;
131
132 if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
133 struct arch_timer *timer = to_arch_timer(clk);
134 switch (reg) {
135 case ARCH_TIMER_REG_CTRL:
136 val = readl_relaxed(timer->base + CNTP_CTL);
137 break;
138 case ARCH_TIMER_REG_TVAL:
139 val = readl_relaxed(timer->base + CNTP_TVAL);
140 break;
141 }
142 } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
143 struct arch_timer *timer = to_arch_timer(clk);
144 switch (reg) {
145 case ARCH_TIMER_REG_CTRL:
146 val = readl_relaxed(timer->base + CNTV_CTL);
147 break;
148 case ARCH_TIMER_REG_TVAL:
149 val = readl_relaxed(timer->base + CNTV_TVAL);
150 break;
151 }
152 } else {
153 val = arch_timer_reg_read_cp15(access, reg);
154 }
155
156 return val;
157}
158
159static notrace u64 arch_counter_get_cntpct_stable(void)
160{
161 return __arch_counter_get_cntpct_stable();
162}
163
164static notrace u64 arch_counter_get_cntpct(void)
165{
166 return __arch_counter_get_cntpct();
167}
168
169static notrace u64 arch_counter_get_cntvct_stable(void)
170{
171 return __arch_counter_get_cntvct_stable();
172}
173
174static notrace u64 arch_counter_get_cntvct(void)
175{
176 return __arch_counter_get_cntvct();
177}
178
179
180
181
182
183
184
185u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
186EXPORT_SYMBOL_GPL(arch_timer_read_counter);
187
188static u64 arch_counter_read(struct clocksource *cs)
189{
190 return arch_timer_read_counter();
191}
192
193static u64 arch_counter_read_cc(const struct cyclecounter *cc)
194{
195 return arch_timer_read_counter();
196}
197
198static struct clocksource clocksource_counter = {
199 .name = "arch_sys_counter",
200 .rating = 400,
201 .read = arch_counter_read,
202 .mask = CLOCKSOURCE_MASK(56),
203 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
204};
205
206static struct cyclecounter cyclecounter __ro_after_init = {
207 .read = arch_counter_read_cc,
208 .mask = CLOCKSOURCE_MASK(56),
209};
210
211struct ate_acpi_oem_info {
212 char oem_id[ACPI_OEM_ID_SIZE + 1];
213 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
214 u32 oem_revision;
215};
216
217#ifdef CONFIG_FSL_ERRATUM_A008585
218
219
220
221
222#define __fsl_a008585_read_reg(reg) ({ \
223 u64 _old, _new; \
224 int _retries = 200; \
225 \
226 do { \
227 _old = read_sysreg(reg); \
228 _new = read_sysreg(reg); \
229 _retries--; \
230 } while (unlikely(_old != _new) && _retries); \
231 \
232 WARN_ON_ONCE(!_retries); \
233 _new; \
234})
235
236static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
237{
238 return __fsl_a008585_read_reg(cntp_tval_el0);
239}
240
241static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
242{
243 return __fsl_a008585_read_reg(cntv_tval_el0);
244}
245
246static u64 notrace fsl_a008585_read_cntpct_el0(void)
247{
248 return __fsl_a008585_read_reg(cntpct_el0);
249}
250
251static u64 notrace fsl_a008585_read_cntvct_el0(void)
252{
253 return __fsl_a008585_read_reg(cntvct_el0);
254}
255#endif
256
257#ifdef CONFIG_HISILICON_ERRATUM_161010101
258
259
260
261
262
263
264
265
266
267
268#define __hisi_161010101_read_reg(reg) ({ \
269 u64 _old, _new; \
270 int _retries = 50; \
271 \
272 do { \
273 _old = read_sysreg(reg); \
274 _new = read_sysreg(reg); \
275 _retries--; \
276 } while (unlikely((_new - _old) >> 5) && _retries); \
277 \
278 WARN_ON_ONCE(!_retries); \
279 _new; \
280})
281
282static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
283{
284 return __hisi_161010101_read_reg(cntp_tval_el0);
285}
286
287static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
288{
289 return __hisi_161010101_read_reg(cntv_tval_el0);
290}
291
292static u64 notrace hisi_161010101_read_cntpct_el0(void)
293{
294 return __hisi_161010101_read_reg(cntpct_el0);
295}
296
297static u64 notrace hisi_161010101_read_cntvct_el0(void)
298{
299 return __hisi_161010101_read_reg(cntvct_el0);
300}
301
302static struct ate_acpi_oem_info hisi_161010101_oem_info[] = {
303
304
305
306
307 {
308 .oem_id = "HISI ",
309 .oem_table_id = "HIP05 ",
310 .oem_revision = 0,
311 },
312 {
313 .oem_id = "HISI ",
314 .oem_table_id = "HIP06 ",
315 .oem_revision = 0,
316 },
317 {
318 .oem_id = "HISI ",
319 .oem_table_id = "HIP07 ",
320 .oem_revision = 0,
321 },
322 { },
323};
324#endif
325
326#ifdef CONFIG_ARM64_ERRATUM_858921
327static u64 notrace arm64_858921_read_cntpct_el0(void)
328{
329 u64 old, new;
330
331 old = read_sysreg(cntpct_el0);
332 new = read_sysreg(cntpct_el0);
333 return (((old ^ new) >> 32) & 1) ? old : new;
334}
335
336static u64 notrace arm64_858921_read_cntvct_el0(void)
337{
338 u64 old, new;
339
340 old = read_sysreg(cntvct_el0);
341 new = read_sysreg(cntvct_el0);
342 return (((old ^ new) >> 32) & 1) ? old : new;
343}
344#endif
345
346#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
347DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
348EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
349
350static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
351
352static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
353 struct clock_event_device *clk)
354{
355 unsigned long ctrl;
356 u64 cval;
357
358 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
359 ctrl |= ARCH_TIMER_CTRL_ENABLE;
360 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
361
362 if (access == ARCH_TIMER_PHYS_ACCESS) {
363 cval = evt + arch_counter_get_cntpct();
364 write_sysreg(cval, cntp_cval_el0);
365 } else {
366 cval = evt + arch_counter_get_cntvct();
367 write_sysreg(cval, cntv_cval_el0);
368 }
369
370 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
371}
372
373static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
374 struct clock_event_device *clk)
375{
376 erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
377 return 0;
378}
379
380static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
381 struct clock_event_device *clk)
382{
383 erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
384 return 0;
385}
386
387static const struct arch_timer_erratum_workaround ool_workarounds[] = {
388#ifdef CONFIG_FSL_ERRATUM_A008585
389 {
390 .match_type = ate_match_dt,
391 .id = "fsl,erratum-a008585",
392 .desc = "Freescale erratum a005858",
393 .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
394 .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
395 .read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
396 .read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
397 .set_next_event_phys = erratum_set_next_event_tval_phys,
398 .set_next_event_virt = erratum_set_next_event_tval_virt,
399 },
400#endif
401#ifdef CONFIG_HISILICON_ERRATUM_161010101
402 {
403 .match_type = ate_match_dt,
404 .id = "hisilicon,erratum-161010101",
405 .desc = "HiSilicon erratum 161010101",
406 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
407 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
408 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
409 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
410 .set_next_event_phys = erratum_set_next_event_tval_phys,
411 .set_next_event_virt = erratum_set_next_event_tval_virt,
412 },
413 {
414 .match_type = ate_match_acpi_oem_info,
415 .id = hisi_161010101_oem_info,
416 .desc = "HiSilicon erratum 161010101",
417 .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
418 .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
419 .read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
420 .read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
421 .set_next_event_phys = erratum_set_next_event_tval_phys,
422 .set_next_event_virt = erratum_set_next_event_tval_virt,
423 },
424#endif
425#ifdef CONFIG_ARM64_ERRATUM_858921
426 {
427 .match_type = ate_match_local_cap_id,
428 .id = (void *)ARM64_WORKAROUND_858921,
429 .desc = "ARM erratum 858921",
430 .read_cntpct_el0 = arm64_858921_read_cntpct_el0,
431 .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
432 },
433#endif
434};
435
436typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
437 const void *);
438
439static
440bool arch_timer_check_dt_erratum(const struct arch_timer_erratum_workaround *wa,
441 const void *arg)
442{
443 const struct device_node *np = arg;
444
445 return of_property_read_bool(np, wa->id);
446}
447
448static
449bool arch_timer_check_local_cap_erratum(const struct arch_timer_erratum_workaround *wa,
450 const void *arg)
451{
452 return this_cpu_has_cap((uintptr_t)wa->id);
453}
454
455
456static
457bool arch_timer_check_acpi_oem_erratum(const struct arch_timer_erratum_workaround *wa,
458 const void *arg)
459{
460 static const struct ate_acpi_oem_info empty_oem_info = {};
461 const struct ate_acpi_oem_info *info = wa->id;
462 const struct acpi_table_header *table = arg;
463
464
465 while (memcmp(info, &empty_oem_info, sizeof(*info))) {
466 if (!memcmp(info->oem_id, table->oem_id, ACPI_OEM_ID_SIZE) &&
467 !memcmp(info->oem_table_id, table->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
468 info->oem_revision == table->oem_revision)
469 return true;
470
471 info++;
472 }
473
474 return false;
475}
476
477static const struct arch_timer_erratum_workaround *
478arch_timer_iterate_errata(enum arch_timer_erratum_match_type type,
479 ate_match_fn_t match_fn,
480 void *arg)
481{
482 int i;
483
484 for (i = 0; i < ARRAY_SIZE(ool_workarounds); i++) {
485 if (ool_workarounds[i].match_type != type)
486 continue;
487
488 if (match_fn(&ool_workarounds[i], arg))
489 return &ool_workarounds[i];
490 }
491
492 return NULL;
493}
494
495static
496void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa,
497 bool local)
498{
499 int i;
500
501 if (local) {
502 __this_cpu_write(timer_unstable_counter_workaround, wa);
503 } else {
504 for_each_possible_cpu(i)
505 per_cpu(timer_unstable_counter_workaround, i) = wa;
506 }
507
508 if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
509 atomic_set(&timer_unstable_counter_workaround_in_use, 1);
510
511
512
513
514
515
516
517 if (wa->read_cntvct_el0) {
518 clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
519 vdso_default = VDSO_CLOCKMODE_NONE;
520 }
521}
522
523static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
524 void *arg)
525{
526 const struct arch_timer_erratum_workaround *wa, *__wa;
527 ate_match_fn_t match_fn = NULL;
528 bool local = false;
529
530 switch (type) {
531 case ate_match_dt:
532 match_fn = arch_timer_check_dt_erratum;
533 break;
534 case ate_match_local_cap_id:
535 match_fn = arch_timer_check_local_cap_erratum;
536 local = true;
537 break;
538 case ate_match_acpi_oem_info:
539 match_fn = arch_timer_check_acpi_oem_erratum;
540 break;
541 default:
542 WARN_ON(1);
543 return;
544 }
545
546 wa = arch_timer_iterate_errata(type, match_fn, arg);
547 if (!wa)
548 return;
549
550 __wa = __this_cpu_read(timer_unstable_counter_workaround);
551 if (__wa && wa != __wa)
552 pr_warn("Can't enable workaround for %s (clashes with %s\n)",
553 wa->desc, __wa->desc);
554
555 if (__wa)
556 return;
557
558 arch_timer_enable_workaround(wa, local);
559 pr_info("Enabling %s workaround for %s\n",
560 local ? "local" : "global", wa->desc);
561}
562
563static bool arch_timer_this_cpu_has_cntvct_wa(void)
564{
565 return has_erratum_handler(read_cntvct_el0);
566}
567
568static bool arch_timer_counter_has_wa(void)
569{
570 return atomic_read(&timer_unstable_counter_workaround_in_use);
571}
572#else
573#define arch_timer_check_ool_workaround(t,a) do { } while(0)
574#define arch_timer_this_cpu_has_cntvct_wa() ({false;})
575#define arch_timer_counter_has_wa() ({false;})
576#endif
577
578static __always_inline irqreturn_t timer_handler(const int access,
579 struct clock_event_device *evt)
580{
581 unsigned long ctrl;
582
583 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
584 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
585 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
586 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
587 evt->event_handler(evt);
588 return IRQ_HANDLED;
589 }
590
591 return IRQ_NONE;
592}
593
594static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
595{
596 struct clock_event_device *evt = dev_id;
597
598 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
599}
600
601static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
602{
603 struct clock_event_device *evt = dev_id;
604
605 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
606}
607
608static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
609{
610 struct clock_event_device *evt = dev_id;
611
612 return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
613}
614
615static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
616{
617 struct clock_event_device *evt = dev_id;
618
619 return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
620}
621
622static __always_inline int timer_shutdown(const int access,
623 struct clock_event_device *clk)
624{
625 unsigned long ctrl;
626
627 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
628 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
629 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
630
631 return 0;
632}
633
634static int arch_timer_shutdown_virt(struct clock_event_device *clk)
635{
636 return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
637}
638
639static int arch_timer_shutdown_phys(struct clock_event_device *clk)
640{
641 return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
642}
643
644static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
645{
646 return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
647}
648
649static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
650{
651 return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
652}
653
654static __always_inline void set_next_event(const int access, unsigned long evt,
655 struct clock_event_device *clk)
656{
657 unsigned long ctrl;
658 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
659 ctrl |= ARCH_TIMER_CTRL_ENABLE;
660 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
661 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
662 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
663}
664
665static int arch_timer_set_next_event_virt(unsigned long evt,
666 struct clock_event_device *clk)
667{
668 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
669 return 0;
670}
671
672static int arch_timer_set_next_event_phys(unsigned long evt,
673 struct clock_event_device *clk)
674{
675 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
676 return 0;
677}
678
679static int arch_timer_set_next_event_virt_mem(unsigned long evt,
680 struct clock_event_device *clk)
681{
682 set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
683 return 0;
684}
685
686static int arch_timer_set_next_event_phys_mem(unsigned long evt,
687 struct clock_event_device *clk)
688{
689 set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
690 return 0;
691}
692
693static void __arch_timer_setup(unsigned type,
694 struct clock_event_device *clk)
695{
696 clk->features = CLOCK_EVT_FEAT_ONESHOT;
697
698 if (type == ARCH_TIMER_TYPE_CP15) {
699 typeof(clk->set_next_event) sne;
700
701 arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
702
703 if (arch_timer_c3stop)
704 clk->features |= CLOCK_EVT_FEAT_C3STOP;
705 clk->name = "arch_sys_timer";
706 clk->rating = 450;
707 clk->cpumask = cpumask_of(smp_processor_id());
708 clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
709 switch (arch_timer_uses_ppi) {
710 case ARCH_TIMER_VIRT_PPI:
711 clk->set_state_shutdown = arch_timer_shutdown_virt;
712 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
713 sne = erratum_handler(set_next_event_virt);
714 break;
715 case ARCH_TIMER_PHYS_SECURE_PPI:
716 case ARCH_TIMER_PHYS_NONSECURE_PPI:
717 case ARCH_TIMER_HYP_PPI:
718 clk->set_state_shutdown = arch_timer_shutdown_phys;
719 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
720 sne = erratum_handler(set_next_event_phys);
721 break;
722 default:
723 BUG();
724 }
725
726 clk->set_next_event = sne;
727 } else {
728 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
729 clk->name = "arch_mem_timer";
730 clk->rating = 400;
731 clk->cpumask = cpu_possible_mask;
732 if (arch_timer_mem_use_virtual) {
733 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
734 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
735 clk->set_next_event =
736 arch_timer_set_next_event_virt_mem;
737 } else {
738 clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
739 clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
740 clk->set_next_event =
741 arch_timer_set_next_event_phys_mem;
742 }
743 }
744
745 clk->set_state_shutdown(clk);
746
747 clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
748}
749
750static void arch_timer_evtstrm_enable(int divider)
751{
752 u32 cntkctl = arch_timer_get_cntkctl();
753
754 cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
755
756 cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
757 | ARCH_TIMER_VIRT_EVT_EN;
758 arch_timer_set_cntkctl(cntkctl);
759 elf_hwcap |= HWCAP_EVTSTRM;
760#ifdef CONFIG_COMPAT
761 compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
762#endif
763 cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
764}
765
766static void arch_timer_configure_evtstream(void)
767{
768 int evt_stream_div, pos;
769
770
771 evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
772 pos = fls(evt_stream_div);
773 if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
774 pos--;
775
776 arch_timer_evtstrm_enable(min(pos, 15));
777}
778
779static void arch_counter_set_user_access(void)
780{
781 u32 cntkctl = arch_timer_get_cntkctl();
782
783
784
785 cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
786 | ARCH_TIMER_USR_VT_ACCESS_EN
787 | ARCH_TIMER_USR_VCT_ACCESS_EN
788 | ARCH_TIMER_VIRT_EVT_EN
789 | ARCH_TIMER_USR_PCT_ACCESS_EN);
790
791
792
793
794
795
796 if (arch_timer_this_cpu_has_cntvct_wa())
797 pr_info("CPU%d: Trapping CNTVCT access\n", smp_processor_id());
798 else
799 cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
800
801 arch_timer_set_cntkctl(cntkctl);
802}
803
804static bool arch_timer_has_nonsecure_ppi(void)
805{
806 return (arch_timer_uses_ppi == ARCH_TIMER_PHYS_SECURE_PPI &&
807 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
808}
809
810static u32 check_ppi_trigger(int irq)
811{
812 u32 flags = irq_get_trigger_type(irq);
813
814 if (flags != IRQF_TRIGGER_HIGH && flags != IRQF_TRIGGER_LOW) {
815 pr_warn("WARNING: Invalid trigger for IRQ%d, assuming level low\n", irq);
816 pr_warn("WARNING: Please fix your firmware\n");
817 flags = IRQF_TRIGGER_LOW;
818 }
819
820 return flags;
821}
822
823static int arch_timer_starting_cpu(unsigned int cpu)
824{
825 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
826 u32 flags;
827
828 __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
829
830 flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
831 enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
832
833 if (arch_timer_has_nonsecure_ppi()) {
834 flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
835 enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
836 flags);
837 }
838
839 arch_counter_set_user_access();
840 if (evtstrm_enable)
841 arch_timer_configure_evtstream();
842
843 return 0;
844}
845
846
847
848
849
850
851static void arch_timer_of_configure_rate(u32 rate, struct device_node *np)
852{
853
854 if (arch_timer_rate)
855 return;
856
857 if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate))
858 arch_timer_rate = rate;
859
860
861 if (arch_timer_rate == 0)
862 pr_warn("frequency not available\n");
863}
864
865static void arch_timer_banner(unsigned type)
866{
867 pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
868 type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
869 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
870 " and " : "",
871 type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
872 (unsigned long)arch_timer_rate / 1000000,
873 (unsigned long)(arch_timer_rate / 10000) % 100,
874 type & ARCH_TIMER_TYPE_CP15 ?
875 (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
876 "",
877 type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
878 type & ARCH_TIMER_TYPE_MEM ?
879 arch_timer_mem_use_virtual ? "virt" : "phys" :
880 "");
881}
882
883u32 arch_timer_get_rate(void)
884{
885 return arch_timer_rate;
886}
887
888bool arch_timer_evtstrm_available(void)
889{
890
891
892
893
894
895 return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
896}
897
898static u64 arch_counter_get_cntvct_mem(void)
899{
900 u32 vct_lo, vct_hi, tmp_hi;
901
902 do {
903 vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
904 vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
905 tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
906 } while (vct_hi != tmp_hi);
907
908 return ((u64) vct_hi << 32) | vct_lo;
909}
910
911static struct arch_timer_kvm_info arch_timer_kvm_info;
912
913struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
914{
915 return &arch_timer_kvm_info;
916}
917
918static void __init arch_counter_register(unsigned type)
919{
920 u64 start_count;
921
922
923 if (type & ARCH_TIMER_TYPE_CP15) {
924 u64 (*rd)(void);
925
926 if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
927 arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
928 if (arch_timer_counter_has_wa())
929 rd = arch_counter_get_cntvct_stable;
930 else
931 rd = arch_counter_get_cntvct;
932 } else {
933 if (arch_timer_counter_has_wa())
934 rd = arch_counter_get_cntpct_stable;
935 else
936 rd = arch_counter_get_cntpct;
937 }
938
939 arch_timer_read_counter = rd;
940 clocksource_counter.vdso_clock_mode = vdso_default;
941 } else {
942 arch_timer_read_counter = arch_counter_get_cntvct_mem;
943 }
944
945 if (!arch_counter_suspend_stop)
946 clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
947 start_count = arch_timer_read_counter();
948 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
949 cyclecounter.mult = clocksource_counter.mult;
950 cyclecounter.shift = clocksource_counter.shift;
951 timecounter_init(&arch_timer_kvm_info.timecounter,
952 &cyclecounter, start_count);
953
954
955 sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
956}
957
958static void arch_timer_stop(struct clock_event_device *clk)
959{
960 pr_debug("disable IRQ%d cpu #%d\n", clk->irq, smp_processor_id());
961
962 disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
963 if (arch_timer_has_nonsecure_ppi())
964 disable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
965
966 clk->set_state_shutdown(clk);
967}
968
969static int arch_timer_dying_cpu(unsigned int cpu)
970{
971 struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
972
973 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
974
975 arch_timer_stop(clk);
976 return 0;
977}
978
979#ifdef CONFIG_CPU_PM
980static DEFINE_PER_CPU(unsigned long, saved_cntkctl);
981static int arch_timer_cpu_pm_notify(struct notifier_block *self,
982 unsigned long action, void *hcpu)
983{
984 if (action == CPU_PM_ENTER) {
985 __this_cpu_write(saved_cntkctl, arch_timer_get_cntkctl());
986
987 cpumask_clear_cpu(smp_processor_id(), &evtstrm_available);
988 } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
989 arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
990
991 if (elf_hwcap & HWCAP_EVTSTRM)
992 cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
993 }
994 return NOTIFY_OK;
995}
996
997static struct notifier_block arch_timer_cpu_pm_notifier = {
998 .notifier_call = arch_timer_cpu_pm_notify,
999};
1000
1001static int __init arch_timer_cpu_pm_init(void)
1002{
1003 return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
1004}
1005
1006static void __init arch_timer_cpu_pm_deinit(void)
1007{
1008 WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
1009}
1010
1011#else
1012static int __init arch_timer_cpu_pm_init(void)
1013{
1014 return 0;
1015}
1016
1017static void __init arch_timer_cpu_pm_deinit(void)
1018{
1019}
1020#endif
1021
1022static int __init arch_timer_register(void)
1023{
1024 int err;
1025 int ppi;
1026
1027 arch_timer_evt = alloc_percpu(struct clock_event_device);
1028 if (!arch_timer_evt) {
1029 err = -ENOMEM;
1030 goto out;
1031 }
1032
1033 ppi = arch_timer_ppi[arch_timer_uses_ppi];
1034 switch (arch_timer_uses_ppi) {
1035 case ARCH_TIMER_VIRT_PPI:
1036 err = request_percpu_irq(ppi, arch_timer_handler_virt,
1037 "arch_timer", arch_timer_evt);
1038 break;
1039 case ARCH_TIMER_PHYS_SECURE_PPI:
1040 case ARCH_TIMER_PHYS_NONSECURE_PPI:
1041 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1042 "arch_timer", arch_timer_evt);
1043 if (!err && arch_timer_has_nonsecure_ppi()) {
1044 ppi = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1045 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1046 "arch_timer", arch_timer_evt);
1047 if (err)
1048 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_SECURE_PPI],
1049 arch_timer_evt);
1050 }
1051 break;
1052 case ARCH_TIMER_HYP_PPI:
1053 err = request_percpu_irq(ppi, arch_timer_handler_phys,
1054 "arch_timer", arch_timer_evt);
1055 break;
1056 default:
1057 BUG();
1058 }
1059
1060 if (err) {
1061 pr_err("can't register interrupt %d (%d)\n", ppi, err);
1062 goto out_free;
1063 }
1064
1065 err = arch_timer_cpu_pm_init();
1066 if (err)
1067 goto out_unreg_notify;
1068
1069
1070 err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
1071 "clockevents/arm/arch_timer:starting",
1072 arch_timer_starting_cpu, arch_timer_dying_cpu);
1073 if (err)
1074 goto out_unreg_cpupm;
1075 return 0;
1076
1077out_unreg_cpupm:
1078 arch_timer_cpu_pm_deinit();
1079
1080out_unreg_notify:
1081 free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
1082 if (arch_timer_has_nonsecure_ppi())
1083 free_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
1084 arch_timer_evt);
1085
1086out_free:
1087 free_percpu(arch_timer_evt);
1088out:
1089 return err;
1090}
1091
1092static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
1093{
1094 int ret;
1095 irq_handler_t func;
1096 struct arch_timer *t;
1097
1098 t = kzalloc(sizeof(*t), GFP_KERNEL);
1099 if (!t)
1100 return -ENOMEM;
1101
1102 t->base = base;
1103 t->evt.irq = irq;
1104 __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
1105
1106 if (arch_timer_mem_use_virtual)
1107 func = arch_timer_handler_virt_mem;
1108 else
1109 func = arch_timer_handler_phys_mem;
1110
1111 ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
1112 if (ret) {
1113 pr_err("Failed to request mem timer irq\n");
1114 kfree(t);
1115 }
1116
1117 return ret;
1118}
1119
1120static const struct of_device_id arch_timer_of_match[] __initconst = {
1121 { .compatible = "arm,armv7-timer", },
1122 { .compatible = "arm,armv8-timer", },
1123 {},
1124};
1125
1126static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
1127 { .compatible = "arm,armv7-timer-mem", },
1128 {},
1129};
1130
1131static bool __init arch_timer_needs_of_probing(void)
1132{
1133 struct device_node *dn;
1134 bool needs_probing = false;
1135 unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
1136
1137
1138 if ((arch_timers_present & mask) == mask)
1139 return false;
1140
1141
1142
1143
1144
1145 if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
1146 dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
1147 else
1148 dn = of_find_matching_node(NULL, arch_timer_of_match);
1149
1150 if (dn && of_device_is_available(dn))
1151 needs_probing = true;
1152
1153 of_node_put(dn);
1154
1155 return needs_probing;
1156}
1157
1158static int __init arch_timer_common_init(void)
1159{
1160 arch_timer_banner(arch_timers_present);
1161 arch_counter_register(arch_timers_present);
1162 return arch_timer_arch_init();
1163}
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1184{
1185 if (is_kernel_in_hyp_mode())
1186 return ARCH_TIMER_HYP_PPI;
1187
1188 if (!is_hyp_mode_available() && arch_timer_ppi[ARCH_TIMER_VIRT_PPI])
1189 return ARCH_TIMER_VIRT_PPI;
1190
1191 if (IS_ENABLED(CONFIG_ARM64))
1192 return ARCH_TIMER_PHYS_NONSECURE_PPI;
1193
1194 return ARCH_TIMER_PHYS_SECURE_PPI;
1195}
1196
1197static void __init arch_timer_populate_kvm_info(void)
1198{
1199 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1200 if (is_kernel_in_hyp_mode())
1201 arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1202}
1203
1204static int __init arch_timer_of_init(struct device_node *np)
1205{
1206 int i, ret;
1207 u32 rate;
1208
1209 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1210 pr_warn("multiple nodes in dt, skipping\n");
1211 return 0;
1212 }
1213
1214 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1215 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1216 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1217
1218 arch_timer_populate_kvm_info();
1219
1220 rate = arch_timer_get_cntfrq();
1221 arch_timer_of_configure_rate(rate, np);
1222
1223 arch_timer_c3stop = !of_property_read_bool(np, "always-on");
1224
1225
1226 arch_timer_check_ool_workaround(ate_match_dt, np);
1227
1228
1229
1230
1231
1232 if (IS_ENABLED(CONFIG_ARM) &&
1233 of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
1234 arch_timer_uses_ppi = ARCH_TIMER_PHYS_SECURE_PPI;
1235 else
1236 arch_timer_uses_ppi = arch_timer_select_ppi();
1237
1238 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1239 pr_err("No interrupt available, giving up\n");
1240 return -EINVAL;
1241 }
1242
1243
1244 arch_counter_suspend_stop = of_property_read_bool(np,
1245 "arm,no-tick-in-suspend");
1246
1247 ret = arch_timer_register();
1248 if (ret)
1249 return ret;
1250
1251 if (arch_timer_needs_of_probing())
1252 return 0;
1253
1254 return arch_timer_common_init();
1255}
1256TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
1257TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
1258
1259static u32 __init
1260arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
1261{
1262 void __iomem *base;
1263 u32 rate;
1264
1265 base = ioremap(frame->cntbase, frame->size);
1266 if (!base) {
1267 pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
1268 return 0;
1269 }
1270
1271 rate = readl_relaxed(base + CNTFRQ);
1272
1273 iounmap(base);
1274
1275 return rate;
1276}
1277
1278static struct arch_timer_mem_frame * __init
1279arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
1280{
1281 struct arch_timer_mem_frame *frame, *best_frame = NULL;
1282 void __iomem *cntctlbase;
1283 u32 cnttidr;
1284 int i;
1285
1286 cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
1287 if (!cntctlbase) {
1288 pr_err("Can't map CNTCTLBase @ %pa\n",
1289 &timer_mem->cntctlbase);
1290 return NULL;
1291 }
1292
1293 cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
1294
1295
1296
1297
1298
1299 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1300 u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
1301 CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
1302
1303 frame = &timer_mem->frame[i];
1304 if (!frame->valid)
1305 continue;
1306
1307
1308 writel_relaxed(cntacr, cntctlbase + CNTACR(i));
1309 cntacr = readl_relaxed(cntctlbase + CNTACR(i));
1310
1311 if ((cnttidr & CNTTIDR_VIRT(i)) &&
1312 !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
1313 best_frame = frame;
1314 arch_timer_mem_use_virtual = true;
1315 break;
1316 }
1317
1318 if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
1319 continue;
1320
1321 best_frame = frame;
1322 }
1323
1324 iounmap(cntctlbase);
1325
1326 return best_frame;
1327}
1328
1329static int __init
1330arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
1331{
1332 void __iomem *base;
1333 int ret, irq = 0;
1334
1335 if (arch_timer_mem_use_virtual)
1336 irq = frame->virt_irq;
1337 else
1338 irq = frame->phys_irq;
1339
1340 if (!irq) {
1341 pr_err("Frame missing %s irq.\n",
1342 arch_timer_mem_use_virtual ? "virt" : "phys");
1343 return -EINVAL;
1344 }
1345
1346 if (!request_mem_region(frame->cntbase, frame->size,
1347 "arch_mem_timer"))
1348 return -EBUSY;
1349
1350 base = ioremap(frame->cntbase, frame->size);
1351 if (!base) {
1352 pr_err("Can't map frame's registers\n");
1353 return -ENXIO;
1354 }
1355
1356 ret = arch_timer_mem_register(base, irq);
1357 if (ret) {
1358 iounmap(base);
1359 return ret;
1360 }
1361
1362 arch_counter_base = base;
1363 arch_timers_present |= ARCH_TIMER_TYPE_MEM;
1364
1365 return 0;
1366}
1367
1368static int __init arch_timer_mem_of_init(struct device_node *np)
1369{
1370 struct arch_timer_mem *timer_mem;
1371 struct arch_timer_mem_frame *frame;
1372 struct device_node *frame_node;
1373 struct resource res;
1374 int ret = -EINVAL;
1375 u32 rate;
1376
1377 timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
1378 if (!timer_mem)
1379 return -ENOMEM;
1380
1381 if (of_address_to_resource(np, 0, &res))
1382 goto out;
1383 timer_mem->cntctlbase = res.start;
1384 timer_mem->size = resource_size(&res);
1385
1386 for_each_available_child_of_node(np, frame_node) {
1387 u32 n;
1388 struct arch_timer_mem_frame *frame;
1389
1390 if (of_property_read_u32(frame_node, "frame-number", &n)) {
1391 pr_err(FW_BUG "Missing frame-number.\n");
1392 of_node_put(frame_node);
1393 goto out;
1394 }
1395 if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
1396 pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
1397 ARCH_TIMER_MEM_MAX_FRAMES - 1);
1398 of_node_put(frame_node);
1399 goto out;
1400 }
1401 frame = &timer_mem->frame[n];
1402
1403 if (frame->valid) {
1404 pr_err(FW_BUG "Duplicated frame-number.\n");
1405 of_node_put(frame_node);
1406 goto out;
1407 }
1408
1409 if (of_address_to_resource(frame_node, 0, &res)) {
1410 of_node_put(frame_node);
1411 goto out;
1412 }
1413 frame->cntbase = res.start;
1414 frame->size = resource_size(&res);
1415
1416 frame->virt_irq = irq_of_parse_and_map(frame_node,
1417 ARCH_TIMER_VIRT_SPI);
1418 frame->phys_irq = irq_of_parse_and_map(frame_node,
1419 ARCH_TIMER_PHYS_SPI);
1420
1421 frame->valid = true;
1422 }
1423
1424 frame = arch_timer_mem_find_best_frame(timer_mem);
1425 if (!frame) {
1426 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1427 &timer_mem->cntctlbase);
1428 ret = -EINVAL;
1429 goto out;
1430 }
1431
1432 rate = arch_timer_mem_frame_get_cntfrq(frame);
1433 arch_timer_of_configure_rate(rate, np);
1434
1435 ret = arch_timer_mem_frame_register(frame);
1436 if (!ret && !arch_timer_needs_of_probing())
1437 ret = arch_timer_common_init();
1438out:
1439 kfree(timer_mem);
1440 return ret;
1441}
1442TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
1443 arch_timer_mem_of_init);
1444
1445#ifdef CONFIG_ACPI_GTDT
1446static int __init
1447arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
1448{
1449 struct arch_timer_mem_frame *frame;
1450 u32 rate;
1451 int i;
1452
1453 for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
1454 frame = &timer_mem->frame[i];
1455
1456 if (!frame->valid)
1457 continue;
1458
1459 rate = arch_timer_mem_frame_get_cntfrq(frame);
1460 if (rate == arch_timer_rate)
1461 continue;
1462
1463 pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
1464 &frame->cntbase,
1465 (unsigned long)rate, (unsigned long)arch_timer_rate);
1466
1467 return -EINVAL;
1468 }
1469
1470 return 0;
1471}
1472
1473static int __init arch_timer_mem_acpi_init(int platform_timer_count)
1474{
1475 struct arch_timer_mem *timers, *timer;
1476 struct arch_timer_mem_frame *frame, *best_frame = NULL;
1477 int timer_count, i, ret = 0;
1478
1479 timers = kcalloc(platform_timer_count, sizeof(*timers),
1480 GFP_KERNEL);
1481 if (!timers)
1482 return -ENOMEM;
1483
1484 ret = acpi_arch_timer_mem_init(timers, &timer_count);
1485 if (ret || !timer_count)
1486 goto out;
1487
1488
1489
1490
1491
1492 for (i = 0; i < timer_count; i++) {
1493 timer = &timers[i];
1494
1495 frame = arch_timer_mem_find_best_frame(timer);
1496 if (!best_frame)
1497 best_frame = frame;
1498
1499 ret = arch_timer_mem_verify_cntfrq(timer);
1500 if (ret) {
1501 pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
1502 goto out;
1503 }
1504
1505 if (!best_frame)
1506
1507
1508
1509
1510 pr_err("Unable to find a suitable frame in timer @ %pa\n",
1511 &timer->cntctlbase);
1512 }
1513
1514 if (best_frame)
1515 ret = arch_timer_mem_frame_register(best_frame);
1516out:
1517 kfree(timers);
1518 return ret;
1519}
1520
1521
1522static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1523{
1524 int ret, platform_timer_count;
1525
1526 if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
1527 pr_warn("already initialized, skipping\n");
1528 return -EINVAL;
1529 }
1530
1531 arch_timers_present |= ARCH_TIMER_TYPE_CP15;
1532
1533 ret = acpi_gtdt_init(table, &platform_timer_count);
1534 if (ret) {
1535 pr_err("Failed to init GTDT table.\n");
1536 return ret;
1537 }
1538
1539 arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI] =
1540 acpi_gtdt_map_ppi(ARCH_TIMER_PHYS_NONSECURE_PPI);
1541
1542 arch_timer_ppi[ARCH_TIMER_VIRT_PPI] =
1543 acpi_gtdt_map_ppi(ARCH_TIMER_VIRT_PPI);
1544
1545 arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1546 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1547
1548 arch_timer_populate_kvm_info();
1549
1550
1551
1552
1553
1554 arch_timer_rate = arch_timer_get_cntfrq();
1555 if (!arch_timer_rate) {
1556 pr_err(FW_BUG "frequency not available.\n");
1557 return -EINVAL;
1558 }
1559
1560 arch_timer_uses_ppi = arch_timer_select_ppi();
1561 if (!arch_timer_ppi[arch_timer_uses_ppi]) {
1562 pr_err("No interrupt available, giving up\n");
1563 return -EINVAL;
1564 }
1565
1566
1567 arch_timer_c3stop = acpi_gtdt_c3stop(arch_timer_uses_ppi);
1568
1569
1570 arch_timer_check_ool_workaround(ate_match_acpi_oem_info, table);
1571
1572 ret = arch_timer_register();
1573 if (ret)
1574 return ret;
1575
1576 if (platform_timer_count &&
1577 arch_timer_mem_acpi_init(platform_timer_count))
1578 pr_err("Failed to initialize memory-mapped timer.\n");
1579
1580 return arch_timer_common_init();
1581}
1582TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
1583#endif
1584