1
2
3
4
5
6
7
8
9
10
11#include <linux/interrupt.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/pci.h>
15#include <linux/msi.h>
16#include <linux/export.h>
17#include <linux/of_platform.h>
18#include <linux/debugfs.h>
19#include <linux/slab.h>
20
21#include <asm/dcr.h>
22#include <asm/machdep.h>
23#include <asm/prom.h>
24
25#include "cell.h"
26
27
28
29
30#define MSIC_CTRL_REG 0x0
31
32
33#define MSIC_BASE_ADDR_HI_REG 0x3
34#define MSIC_BASE_ADDR_LO_REG 0x4
35
36
37#define MSIC_READ_OFFSET_REG 0x5
38#define MSIC_WRITE_OFFSET_REG 0x6
39
40
41
42#define MSIC_CTRL_ENABLE 0x0001
43#define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
44#define MSIC_CTRL_IRQ_ENABLE 0x0008
45#define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
46
47
48
49
50
51#define MSIC_FIFO_SIZE_SHIFT 16
52#define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
53
54
55
56
57
58#define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
59
60
61
62
63
64#define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
65
66
67#define MSIC_FIFO_ENTRY_SIZE 0x10
68
69
70struct axon_msic {
71 struct irq_domain *irq_domain;
72 __le32 *fifo_virt;
73 dma_addr_t fifo_phys;
74 dcr_host_t dcr_host;
75 u32 read_offset;
76#ifdef DEBUG
77 u32 __iomem *trigger;
78#endif
79};
80
81#ifdef DEBUG
82void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
83#else
84static inline void axon_msi_debug_setup(struct device_node *dn,
85 struct axon_msic *msic) { }
86#endif
87
88
89static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
90{
91 pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
92
93 dcr_write(msic->dcr_host, dcr_n, val);
94}
95
96static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
97{
98 struct irq_chip *chip = irq_desc_get_chip(desc);
99 struct axon_msic *msic = irq_get_handler_data(irq);
100 u32 write_offset, msi;
101 int idx;
102 int retry = 0;
103
104 write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
105 pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
106
107
108 write_offset &= MSIC_FIFO_SIZE_MASK;
109
110 while (msic->read_offset != write_offset && retry < 100) {
111 idx = msic->read_offset / sizeof(__le32);
112 msi = le32_to_cpu(msic->fifo_virt[idx]);
113 msi &= 0xFFFF;
114
115 pr_devel("axon_msi: woff %x roff %x msi %x\n",
116 write_offset, msic->read_offset, msi);
117
118 if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
119 generic_handle_irq(msi);
120 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
121 } else {
122
123
124
125
126
127
128 udelay(1);
129 retry++;
130 pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
131 continue;
132 }
133
134 if (retry) {
135 pr_devel("axon_msi: late irq 0x%x, retry %d\n",
136 msi, retry);
137 retry = 0;
138 }
139
140 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
141 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
142 }
143
144 if (retry) {
145 printk(KERN_WARNING "axon_msi: irq timed out\n");
146
147 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
148 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
149 }
150
151 chip->irq_eoi(&desc->irq_data);
152}
153
154static struct axon_msic *find_msi_translator(struct pci_dev *dev)
155{
156 struct irq_domain *irq_domain;
157 struct device_node *dn, *tmp;
158 const phandle *ph;
159 struct axon_msic *msic = NULL;
160
161 dn = of_node_get(pci_device_to_OF_node(dev));
162 if (!dn) {
163 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
164 return NULL;
165 }
166
167 for (; dn; dn = of_get_next_parent(dn)) {
168 ph = of_get_property(dn, "msi-translator", NULL);
169 if (ph)
170 break;
171 }
172
173 if (!ph) {
174 dev_dbg(&dev->dev,
175 "axon_msi: no msi-translator property found\n");
176 goto out_error;
177 }
178
179 tmp = dn;
180 dn = of_find_node_by_phandle(*ph);
181 of_node_put(tmp);
182 if (!dn) {
183 dev_dbg(&dev->dev,
184 "axon_msi: msi-translator doesn't point to a node\n");
185 goto out_error;
186 }
187
188 irq_domain = irq_find_host(dn);
189 if (!irq_domain) {
190 dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
191 dn->full_name);
192 goto out_error;
193 }
194
195 msic = irq_domain->host_data;
196
197out_error:
198 of_node_put(dn);
199
200 return msic;
201}
202
203static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
204{
205 struct device_node *dn;
206 struct msi_desc *entry;
207 int len;
208 const u32 *prop;
209
210 dn = of_node_get(pci_device_to_OF_node(dev));
211 if (!dn) {
212 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
213 return -ENODEV;
214 }
215
216 entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
217
218 for (; dn; dn = of_get_next_parent(dn)) {
219 if (entry->msi_attrib.is_64) {
220 prop = of_get_property(dn, "msi-address-64", &len);
221 if (prop)
222 break;
223 }
224
225 prop = of_get_property(dn, "msi-address-32", &len);
226 if (prop)
227 break;
228 }
229
230 if (!prop) {
231 dev_dbg(&dev->dev,
232 "axon_msi: no msi-address-(32|64) properties found\n");
233 return -ENOENT;
234 }
235
236 switch (len) {
237 case 8:
238 msg->address_hi = prop[0];
239 msg->address_lo = prop[1];
240 break;
241 case 4:
242 msg->address_hi = 0;
243 msg->address_lo = prop[0];
244 break;
245 default:
246 dev_dbg(&dev->dev,
247 "axon_msi: malformed msi-address-(32|64) property\n");
248 of_node_put(dn);
249 return -EINVAL;
250 }
251
252 of_node_put(dn);
253
254 return 0;
255}
256
257static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
258{
259 unsigned int virq, rc;
260 struct msi_desc *entry;
261 struct msi_msg msg;
262 struct axon_msic *msic;
263
264 msic = find_msi_translator(dev);
265 if (!msic)
266 return -ENODEV;
267
268 rc = setup_msi_msg_address(dev, &msg);
269 if (rc)
270 return rc;
271
272 list_for_each_entry(entry, &dev->msi_list, list) {
273 virq = irq_create_direct_mapping(msic->irq_domain);
274 if (virq == NO_IRQ) {
275 dev_warn(&dev->dev,
276 "axon_msi: virq allocation failed!\n");
277 return -1;
278 }
279 dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
280
281 irq_set_msi_desc(virq, entry);
282 msg.data = virq;
283 write_msi_msg(virq, &msg);
284 }
285
286 return 0;
287}
288
289static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
290{
291 struct msi_desc *entry;
292
293 dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
294
295 list_for_each_entry(entry, &dev->msi_list, list) {
296 if (entry->irq == NO_IRQ)
297 continue;
298
299 irq_set_msi_desc(entry->irq, NULL);
300 irq_dispose_mapping(entry->irq);
301 }
302}
303
304static struct irq_chip msic_irq_chip = {
305 .irq_mask = mask_msi_irq,
306 .irq_unmask = unmask_msi_irq,
307 .irq_shutdown = mask_msi_irq,
308 .name = "AXON-MSI",
309};
310
311static int msic_host_map(struct irq_domain *h, unsigned int virq,
312 irq_hw_number_t hw)
313{
314 irq_set_chip_data(virq, h->host_data);
315 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
316
317 return 0;
318}
319
320static const struct irq_domain_ops msic_host_ops = {
321 .map = msic_host_map,
322};
323
324static void axon_msi_shutdown(struct platform_device *device)
325{
326 struct axon_msic *msic = dev_get_drvdata(&device->dev);
327 u32 tmp;
328
329 pr_devel("axon_msi: disabling %s\n",
330 msic->irq_domain->of_node->full_name);
331 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
332 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
333 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
334}
335
336static int axon_msi_probe(struct platform_device *device)
337{
338 struct device_node *dn = device->dev.of_node;
339 struct axon_msic *msic;
340 unsigned int virq;
341 int dcr_base, dcr_len;
342
343 pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
344
345 msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
346 if (!msic) {
347 printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
348 dn->full_name);
349 goto out;
350 }
351
352 dcr_base = dcr_resource_start(dn, 0);
353 dcr_len = dcr_resource_len(dn, 0);
354
355 if (dcr_base == 0 || dcr_len == 0) {
356 printk(KERN_ERR
357 "axon_msi: couldn't parse dcr properties on %s\n",
358 dn->full_name);
359 goto out_free_msic;
360 }
361
362 msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
363 if (!DCR_MAP_OK(msic->dcr_host)) {
364 printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
365 dn->full_name);
366 goto out_free_msic;
367 }
368
369 msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
370 &msic->fifo_phys, GFP_KERNEL);
371 if (!msic->fifo_virt) {
372 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
373 dn->full_name);
374 goto out_free_msic;
375 }
376
377 virq = irq_of_parse_and_map(dn, 0);
378 if (virq == NO_IRQ) {
379 printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
380 dn->full_name);
381 goto out_free_fifo;
382 }
383 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
384
385
386 msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
387 if (!msic->irq_domain) {
388 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
389 dn->full_name);
390 goto out_free_fifo;
391 }
392
393 irq_set_handler_data(virq, msic);
394 irq_set_chained_handler(virq, axon_msi_cascade);
395 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
396
397
398 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
399 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
400 msic->fifo_phys & 0xFFFFFFFF);
401 msic_dcr_write(msic, MSIC_CTRL_REG,
402 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
403 MSIC_CTRL_FIFO_SIZE);
404
405 msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
406 & MSIC_FIFO_SIZE_MASK;
407
408 dev_set_drvdata(&device->dev, msic);
409
410 cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs;
411 cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
412
413 axon_msi_debug_setup(dn, msic);
414
415 printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
416
417 return 0;
418
419out_free_fifo:
420 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
421 msic->fifo_phys);
422out_free_msic:
423 kfree(msic);
424out:
425
426 return -1;
427}
428
429static const struct of_device_id axon_msi_device_id[] = {
430 {
431 .compatible = "ibm,axon-msic"
432 },
433 {}
434};
435
436static struct platform_driver axon_msi_driver = {
437 .probe = axon_msi_probe,
438 .shutdown = axon_msi_shutdown,
439 .driver = {
440 .name = "axon-msi",
441 .owner = THIS_MODULE,
442 .of_match_table = axon_msi_device_id,
443 },
444};
445
446static int __init axon_msi_init(void)
447{
448 return platform_driver_register(&axon_msi_driver);
449}
450subsys_initcall(axon_msi_init);
451
452
453#ifdef DEBUG
454static int msic_set(void *data, u64 val)
455{
456 struct axon_msic *msic = data;
457 out_le32(msic->trigger, val);
458 return 0;
459}
460
461static int msic_get(void *data, u64 *val)
462{
463 *val = 0;
464 return 0;
465}
466
467DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
468
469void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
470{
471 char name[8];
472 u64 addr;
473
474 addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
475 if (addr == OF_BAD_ADDR) {
476 pr_devel("axon_msi: couldn't translate reg property\n");
477 return;
478 }
479
480 msic->trigger = ioremap(addr, 0x4);
481 if (!msic->trigger) {
482 pr_devel("axon_msi: ioremap failed\n");
483 return;
484 }
485
486 snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
487
488 if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
489 msic, &fops_msic)) {
490 pr_devel("axon_msi: debugfs_create_file failed!\n");
491 return;
492 }
493}
494#endif
495