1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/irq.h>
21#include <linux/irqchip.h>
22#include <linux/irqdomain.h>
23
24#include <asm/v7m.h>
25#include <asm/exception.h>
26
27#define NVIC_ISER 0x000
28#define NVIC_ICER 0x080
29#define NVIC_IPR 0x400
30
31#define NVIC_MAX_BANKS 16
32
33
34
35
36#define NVIC_MAX_IRQ ((NVIC_MAX_BANKS - 1) * 32 + 16)
37
38static struct irq_domain *nvic_irq_domain;
39
40static void __nvic_handle_irq(irq_hw_number_t hwirq)
41{
42 generic_handle_domain_irq(nvic_irq_domain, hwirq);
43}
44
45
46
47
48
49asmlinkage void __exception_irq_entry
50nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
51{
52 struct pt_regs *old_regs;
53
54 irq_enter();
55 old_regs = set_irq_regs(regs);
56 __nvic_handle_irq(hwirq);
57 set_irq_regs(old_regs);
58 irq_exit();
59}
60
61static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
62 unsigned int nr_irqs, void *arg)
63{
64 int i, ret;
65 irq_hw_number_t hwirq;
66 unsigned int type = IRQ_TYPE_NONE;
67 struct irq_fwspec *fwspec = arg;
68
69 ret = irq_domain_translate_onecell(domain, fwspec, &hwirq, &type);
70 if (ret)
71 return ret;
72
73 for (i = 0; i < nr_irqs; i++)
74 irq_map_generic_chip(domain, virq + i, hwirq + i);
75
76 return 0;
77}
78
79static const struct irq_domain_ops nvic_irq_domain_ops = {
80 .translate = irq_domain_translate_onecell,
81 .alloc = nvic_irq_domain_alloc,
82 .free = irq_domain_free_irqs_top,
83};
84
85static int __init nvic_of_init(struct device_node *node,
86 struct device_node *parent)
87{
88 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
89 unsigned int irqs, i, ret, numbanks;
90 void __iomem *nvic_base;
91
92 numbanks = (readl_relaxed(V7M_SCS_ICTR) &
93 V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
94
95 nvic_base = of_iomap(node, 0);
96 if (!nvic_base) {
97 pr_warn("unable to map nvic registers\n");
98 return -ENOMEM;
99 }
100
101 irqs = numbanks * 32;
102 if (irqs > NVIC_MAX_IRQ)
103 irqs = NVIC_MAX_IRQ;
104
105 nvic_irq_domain =
106 irq_domain_add_linear(node, irqs, &nvic_irq_domain_ops, NULL);
107
108 if (!nvic_irq_domain) {
109 pr_warn("Failed to allocate irq domain\n");
110 return -ENOMEM;
111 }
112
113 ret = irq_alloc_domain_generic_chips(nvic_irq_domain, 32, 1,
114 "nvic_irq", handle_fasteoi_irq,
115 clr, 0, IRQ_GC_INIT_MASK_CACHE);
116 if (ret) {
117 pr_warn("Failed to allocate irq chips\n");
118 irq_domain_remove(nvic_irq_domain);
119 return ret;
120 }
121
122 for (i = 0; i < numbanks; ++i) {
123 struct irq_chip_generic *gc;
124
125 gc = irq_get_domain_generic_chip(nvic_irq_domain, 32 * i);
126 gc->reg_base = nvic_base + 4 * i;
127 gc->chip_types[0].regs.enable = NVIC_ISER;
128 gc->chip_types[0].regs.disable = NVIC_ICER;
129 gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
130 gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
131
132
133
134 gc->chip_types[0].chip.irq_eoi = irq_gc_noop;
135
136
137 writel_relaxed(~0, gc->reg_base + NVIC_ICER);
138 }
139
140
141 for (i = 0; i < irqs; i += 4)
142 writel_relaxed(0, nvic_base + NVIC_IPR + i);
143
144 return 0;
145}
146IRQCHIP_DECLARE(armv7m_nvic, "arm,armv7m-nvic", nvic_of_init);
147