1
2
3
4
5
6
7
8
9
10
11
12#include <linux/stddef.h>
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/irq.h>
16#include <linux/interrupt.h>
17#include <linux/spinlock.h>
18
19#include <asm/byteorder.h>
20#include <asm/io.h>
21#include <asm/prom.h>
22#include <asm/irq.h>
23
24#include "mv64x60.h"
25
26
27#define MV64X60_IC_MAIN_CAUSE_LO 0x0004
28#define MV64X60_IC_MAIN_CAUSE_HI 0x000c
29#define MV64X60_IC_CPU0_INTR_MASK_LO 0x0014
30#define MV64X60_IC_CPU0_INTR_MASK_HI 0x001c
31#define MV64X60_IC_CPU0_SELECT_CAUSE 0x0024
32
33#define MV64X60_HIGH_GPP_GROUPS 0x0f000000
34#define MV64X60_SELECT_CAUSE_HIGH 0x40000000
35
36
37#define MV64x60_GPP_INTR_CAUSE 0x0008
38#define MV64x60_GPP_INTR_MASK 0x000c
39
40#define MV64x60_LEVEL1_LOW 0
41#define MV64x60_LEVEL1_HIGH 1
42#define MV64x60_LEVEL1_GPP 2
43
44#define MV64x60_LEVEL1_MASK 0x00000060
45#define MV64x60_LEVEL1_OFFSET 5
46
47#define MV64x60_LEVEL2_MASK 0x0000001f
48
49#define MV64x60_NUM_IRQS 96
50
51static DEFINE_SPINLOCK(mv64x60_lock);
52
53static void __iomem *mv64x60_irq_reg_base;
54static void __iomem *mv64x60_gpp_reg_base;
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69static u32 mv64x60_cached_low_mask;
70static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS;
71static u32 mv64x60_cached_gpp_mask;
72
73static struct irq_domain *mv64x60_irq_host;
74
75
76
77
78
79static void mv64x60_mask_low(struct irq_data *d)
80{
81 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
82 unsigned long flags;
83
84 spin_lock_irqsave(&mv64x60_lock, flags);
85 mv64x60_cached_low_mask &= ~(1 << level2);
86 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
87 mv64x60_cached_low_mask);
88 spin_unlock_irqrestore(&mv64x60_lock, flags);
89 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
90}
91
92static void mv64x60_unmask_low(struct irq_data *d)
93{
94 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
95 unsigned long flags;
96
97 spin_lock_irqsave(&mv64x60_lock, flags);
98 mv64x60_cached_low_mask |= 1 << level2;
99 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
100 mv64x60_cached_low_mask);
101 spin_unlock_irqrestore(&mv64x60_lock, flags);
102 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO);
103}
104
105static struct irq_chip mv64x60_chip_low = {
106 .name = "mv64x60_low",
107 .irq_mask = mv64x60_mask_low,
108 .irq_mask_ack = mv64x60_mask_low,
109 .irq_unmask = mv64x60_unmask_low,
110};
111
112
113
114
115
116static void mv64x60_mask_high(struct irq_data *d)
117{
118 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
119 unsigned long flags;
120
121 spin_lock_irqsave(&mv64x60_lock, flags);
122 mv64x60_cached_high_mask &= ~(1 << level2);
123 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
124 mv64x60_cached_high_mask);
125 spin_unlock_irqrestore(&mv64x60_lock, flags);
126 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
127}
128
129static void mv64x60_unmask_high(struct irq_data *d)
130{
131 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
132 unsigned long flags;
133
134 spin_lock_irqsave(&mv64x60_lock, flags);
135 mv64x60_cached_high_mask |= 1 << level2;
136 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
137 mv64x60_cached_high_mask);
138 spin_unlock_irqrestore(&mv64x60_lock, flags);
139 (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI);
140}
141
142static struct irq_chip mv64x60_chip_high = {
143 .name = "mv64x60_high",
144 .irq_mask = mv64x60_mask_high,
145 .irq_mask_ack = mv64x60_mask_high,
146 .irq_unmask = mv64x60_unmask_high,
147};
148
149
150
151
152
153static void mv64x60_mask_gpp(struct irq_data *d)
154{
155 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
156 unsigned long flags;
157
158 spin_lock_irqsave(&mv64x60_lock, flags);
159 mv64x60_cached_gpp_mask &= ~(1 << level2);
160 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
161 mv64x60_cached_gpp_mask);
162 spin_unlock_irqrestore(&mv64x60_lock, flags);
163 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
164}
165
166static void mv64x60_mask_ack_gpp(struct irq_data *d)
167{
168 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
169 unsigned long flags;
170
171 spin_lock_irqsave(&mv64x60_lock, flags);
172 mv64x60_cached_gpp_mask &= ~(1 << level2);
173 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
174 mv64x60_cached_gpp_mask);
175 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE,
176 ~(1 << level2));
177 spin_unlock_irqrestore(&mv64x60_lock, flags);
178 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE);
179}
180
181static void mv64x60_unmask_gpp(struct irq_data *d)
182{
183 int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK;
184 unsigned long flags;
185
186 spin_lock_irqsave(&mv64x60_lock, flags);
187 mv64x60_cached_gpp_mask |= 1 << level2;
188 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
189 mv64x60_cached_gpp_mask);
190 spin_unlock_irqrestore(&mv64x60_lock, flags);
191 (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK);
192}
193
194static struct irq_chip mv64x60_chip_gpp = {
195 .name = "mv64x60_gpp",
196 .irq_mask = mv64x60_mask_gpp,
197 .irq_mask_ack = mv64x60_mask_ack_gpp,
198 .irq_unmask = mv64x60_unmask_gpp,
199};
200
201
202
203
204
205static struct irq_chip *mv64x60_chips[] = {
206 [MV64x60_LEVEL1_LOW] = &mv64x60_chip_low,
207 [MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high,
208 [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp,
209};
210
211static int mv64x60_host_map(struct irq_domain *h, unsigned int virq,
212 irq_hw_number_t hwirq)
213{
214 int level1;
215
216 irq_set_status_flags(virq, IRQ_LEVEL);
217
218 level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET;
219 BUG_ON(level1 > MV64x60_LEVEL1_GPP);
220 irq_set_chip_and_handler(virq, mv64x60_chips[level1],
221 handle_level_irq);
222
223 return 0;
224}
225
226static const struct irq_domain_ops mv64x60_host_ops = {
227 .map = mv64x60_host_map,
228};
229
230
231
232
233
234void __init mv64x60_init_irq(void)
235{
236 struct device_node *np;
237 phys_addr_t paddr;
238 unsigned int size;
239 const unsigned int *reg;
240 unsigned long flags;
241
242 np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp");
243 reg = of_get_property(np, "reg", &size);
244 paddr = of_translate_address(np, reg);
245 mv64x60_gpp_reg_base = ioremap(paddr, reg[1]);
246 of_node_put(np);
247
248 np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic");
249 reg = of_get_property(np, "reg", &size);
250 paddr = of_translate_address(np, reg);
251 mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
252
253 mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS,
254 &mv64x60_host_ops, NULL);
255
256 spin_lock_irqsave(&mv64x60_lock, flags);
257 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
258 mv64x60_cached_gpp_mask);
259 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO,
260 mv64x60_cached_low_mask);
261 out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI,
262 mv64x60_cached_high_mask);
263
264 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0);
265 out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0);
266 out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0);
267 spin_unlock_irqrestore(&mv64x60_lock, flags);
268}
269
270unsigned int mv64x60_get_irq(void)
271{
272 u32 cause;
273 int level1;
274 irq_hw_number_t hwirq;
275 int virq = NO_IRQ;
276
277 cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE);
278 if (cause & MV64X60_SELECT_CAUSE_HIGH) {
279 cause &= mv64x60_cached_high_mask;
280 level1 = MV64x60_LEVEL1_HIGH;
281 if (cause & MV64X60_HIGH_GPP_GROUPS) {
282 cause = in_le32(mv64x60_gpp_reg_base +
283 MV64x60_GPP_INTR_CAUSE);
284 cause &= mv64x60_cached_gpp_mask;
285 level1 = MV64x60_LEVEL1_GPP;
286 }
287 } else {
288 cause &= mv64x60_cached_low_mask;
289 level1 = MV64x60_LEVEL1_LOW;
290 }
291 if (cause) {
292 hwirq = (level1 << MV64x60_LEVEL1_OFFSET) | __ilog2(cause);
293 virq = irq_linear_revmap(mv64x60_irq_host, hwirq);
294 }
295
296 return virq;
297}
298