1
2
3
4
5
6
7
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/errno.h>
12#include <linux/reboot.h>
13#include <linux/slab.h>
14#include <linux/stddef.h>
15#include <linux/sched.h>
16#include <linux/signal.h>
17#include <linux/device.h>
18#include <linux/spinlock.h>
19#include <linux/irq.h>
20#include <linux/interrupt.h>
21#include <linux/kernel_stat.h>
22#include <asm/irq.h>
23#include <asm/io.h>
24#include <asm/prom.h>
25#include <asm/dcr.h>
26
27#define NR_UIC_INTS 32
28
29#define UIC_SR 0x0
30#define UIC_ER 0x2
31#define UIC_CR 0x3
32#define UIC_PR 0x4
33#define UIC_TR 0x5
34#define UIC_MSR 0x6
35#define UIC_VR 0x7
36#define UIC_VCR 0x8
37
38struct uic *primary_uic;
39
40struct uic {
41 int index;
42 int dcrbase;
43
44 raw_spinlock_t lock;
45
46
47 struct irq_domain *irqhost;
48};
49
50static void uic_unmask_irq(struct irq_data *d)
51{
52 struct uic *uic = irq_data_get_irq_chip_data(d);
53 unsigned int src = irqd_to_hwirq(d);
54 unsigned long flags;
55 u32 er, sr;
56
57 sr = 1 << (31-src);
58 raw_spin_lock_irqsave(&uic->lock, flags);
59
60 if (irqd_is_level_type(d))
61 mtdcr(uic->dcrbase + UIC_SR, sr);
62 er = mfdcr(uic->dcrbase + UIC_ER);
63 er |= sr;
64 mtdcr(uic->dcrbase + UIC_ER, er);
65 raw_spin_unlock_irqrestore(&uic->lock, flags);
66}
67
68static void uic_mask_irq(struct irq_data *d)
69{
70 struct uic *uic = irq_data_get_irq_chip_data(d);
71 unsigned int src = irqd_to_hwirq(d);
72 unsigned long flags;
73 u32 er;
74
75 raw_spin_lock_irqsave(&uic->lock, flags);
76 er = mfdcr(uic->dcrbase + UIC_ER);
77 er &= ~(1 << (31 - src));
78 mtdcr(uic->dcrbase + UIC_ER, er);
79 raw_spin_unlock_irqrestore(&uic->lock, flags);
80}
81
82static void uic_ack_irq(struct irq_data *d)
83{
84 struct uic *uic = irq_data_get_irq_chip_data(d);
85 unsigned int src = irqd_to_hwirq(d);
86 unsigned long flags;
87
88 raw_spin_lock_irqsave(&uic->lock, flags);
89 mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
90 raw_spin_unlock_irqrestore(&uic->lock, flags);
91}
92
93static void uic_mask_ack_irq(struct irq_data *d)
94{
95 struct uic *uic = irq_data_get_irq_chip_data(d);
96 unsigned int src = irqd_to_hwirq(d);
97 unsigned long flags;
98 u32 er, sr;
99
100 sr = 1 << (31-src);
101 raw_spin_lock_irqsave(&uic->lock, flags);
102 er = mfdcr(uic->dcrbase + UIC_ER);
103 er &= ~sr;
104 mtdcr(uic->dcrbase + UIC_ER, er);
105
106
107
108
109
110
111
112
113 if (!irqd_is_level_type(d))
114 mtdcr(uic->dcrbase + UIC_SR, sr);
115 raw_spin_unlock_irqrestore(&uic->lock, flags);
116}
117
118static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
119{
120 struct uic *uic = irq_data_get_irq_chip_data(d);
121 unsigned int src = irqd_to_hwirq(d);
122 unsigned long flags;
123 int trigger, polarity;
124 u32 tr, pr, mask;
125
126 switch (flow_type & IRQ_TYPE_SENSE_MASK) {
127 case IRQ_TYPE_NONE:
128 uic_mask_irq(d);
129 return 0;
130
131 case IRQ_TYPE_EDGE_RISING:
132 trigger = 1; polarity = 1;
133 break;
134 case IRQ_TYPE_EDGE_FALLING:
135 trigger = 1; polarity = 0;
136 break;
137 case IRQ_TYPE_LEVEL_HIGH:
138 trigger = 0; polarity = 1;
139 break;
140 case IRQ_TYPE_LEVEL_LOW:
141 trigger = 0; polarity = 0;
142 break;
143 default:
144 return -EINVAL;
145 }
146
147 mask = ~(1 << (31 - src));
148
149 raw_spin_lock_irqsave(&uic->lock, flags);
150 tr = mfdcr(uic->dcrbase + UIC_TR);
151 pr = mfdcr(uic->dcrbase + UIC_PR);
152 tr = (tr & mask) | (trigger << (31-src));
153 pr = (pr & mask) | (polarity << (31-src));
154
155 mtdcr(uic->dcrbase + UIC_PR, pr);
156 mtdcr(uic->dcrbase + UIC_TR, tr);
157 mtdcr(uic->dcrbase + UIC_SR, ~mask);
158
159 raw_spin_unlock_irqrestore(&uic->lock, flags);
160
161 return 0;
162}
163
164static struct irq_chip uic_irq_chip = {
165 .name = "UIC",
166 .irq_unmask = uic_unmask_irq,
167 .irq_mask = uic_mask_irq,
168 .irq_mask_ack = uic_mask_ack_irq,
169 .irq_ack = uic_ack_irq,
170 .irq_set_type = uic_set_irq_type,
171};
172
173static int uic_host_map(struct irq_domain *h, unsigned int virq,
174 irq_hw_number_t hw)
175{
176 struct uic *uic = h->host_data;
177
178 irq_set_chip_data(virq, uic);
179
180
181 irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
182
183
184 irq_set_irq_type(virq, IRQ_TYPE_NONE);
185
186 return 0;
187}
188
189static const struct irq_domain_ops uic_host_ops = {
190 .map = uic_host_map,
191 .xlate = irq_domain_xlate_twocell,
192};
193
194static void uic_irq_cascade(struct irq_desc *desc)
195{
196 struct irq_chip *chip = irq_desc_get_chip(desc);
197 struct irq_data *idata = irq_desc_get_irq_data(desc);
198 struct uic *uic = irq_desc_get_handler_data(desc);
199 u32 msr;
200 int src;
201 int subvirq;
202
203 raw_spin_lock(&desc->lock);
204 if (irqd_is_level_type(idata))
205 chip->irq_mask(idata);
206 else
207 chip->irq_mask_ack(idata);
208 raw_spin_unlock(&desc->lock);
209
210 msr = mfdcr(uic->dcrbase + UIC_MSR);
211 if (!msr)
212 goto uic_irq_ret;
213
214 src = 32 - ffs(msr);
215
216 subvirq = irq_linear_revmap(uic->irqhost, src);
217 generic_handle_irq(subvirq);
218
219uic_irq_ret:
220 raw_spin_lock(&desc->lock);
221 if (irqd_is_level_type(idata))
222 chip->irq_ack(idata);
223 if (!irqd_irq_disabled(idata) && chip->irq_unmask)
224 chip->irq_unmask(idata);
225 raw_spin_unlock(&desc->lock);
226}
227
228static struct uic * __init uic_init_one(struct device_node *node)
229{
230 struct uic *uic;
231 const u32 *indexp, *dcrreg;
232 int len;
233
234 BUG_ON(! of_device_is_compatible(node, "ibm,uic"));
235
236 uic = kzalloc(sizeof(*uic), GFP_KERNEL);
237 if (! uic)
238 return NULL;
239
240 raw_spin_lock_init(&uic->lock);
241 indexp = of_get_property(node, "cell-index", &len);
242 if (!indexp || (len != sizeof(u32))) {
243 printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
244 "cell-index property\n", node);
245 return NULL;
246 }
247 uic->index = *indexp;
248
249 dcrreg = of_get_property(node, "dcr-reg", &len);
250 if (!dcrreg || (len != 2*sizeof(u32))) {
251 printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
252 "dcr-reg property\n", node);
253 return NULL;
254 }
255 uic->dcrbase = *dcrreg;
256
257 uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops,
258 uic);
259 if (! uic->irqhost)
260 return NULL;
261
262
263 mtdcr(uic->dcrbase + UIC_ER, 0);
264 mtdcr(uic->dcrbase + UIC_CR, 0);
265 mtdcr(uic->dcrbase + UIC_TR, 0);
266
267 mtdcr(uic->dcrbase + UIC_SR, 0xffffffff);
268
269 printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index,
270 NR_UIC_INTS, uic->dcrbase);
271
272 return uic;
273}
274
275void __init uic_init_tree(void)
276{
277 struct device_node *np;
278 struct uic *uic;
279 const u32 *interrupts;
280
281
282 for_each_compatible_node(np, NULL, "ibm,uic") {
283 interrupts = of_get_property(np, "interrupts", NULL);
284 if (!interrupts)
285 break;
286 }
287
288 BUG_ON(!np);
289
290 primary_uic = uic_init_one(np);
291 if (!primary_uic)
292 panic("Unable to initialize primary UIC %pOF\n", np);
293
294 irq_set_default_host(primary_uic->irqhost);
295 of_node_put(np);
296
297
298 for_each_compatible_node(np, NULL, "ibm,uic") {
299 interrupts = of_get_property(np, "interrupts", NULL);
300 if (interrupts) {
301
302 int cascade_virq;
303
304 uic = uic_init_one(np);
305 if (! uic)
306 panic("Unable to initialize a secondary UIC %pOF\n",
307 np);
308
309 cascade_virq = irq_of_parse_and_map(np, 0);
310
311 irq_set_handler_data(cascade_virq, uic);
312 irq_set_chained_handler(cascade_virq, uic_irq_cascade);
313
314
315 }
316 }
317}
318
319
320unsigned int uic_get_irq(void)
321{
322 u32 msr;
323 int src;
324
325 BUG_ON(! primary_uic);
326
327 msr = mfdcr(primary_uic->dcrbase + UIC_MSR);
328 src = 32 - ffs(msr);
329
330 return irq_linear_revmap(primary_uic->irqhost, src);
331}
332