1
2
3
4
5
6
7
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17#include <linux/msi.h>
18#include <linux/smp.h>
19
20#include <asm/errno.h>
21#include <asm/io.h>
22
23#include "pci.h"
24#include "msi.h"
25
26static int pci_msi_enable = 1;
27
28static void msi_set_enable(struct pci_dev *dev, int enable)
29{
30 int pos;
31 u16 control;
32
33 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
34 if (pos) {
35 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
36 control &= ~PCI_MSI_FLAGS_ENABLE;
37 if (enable)
38 control |= PCI_MSI_FLAGS_ENABLE;
39 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
40 }
41}
42
43static void msix_set_enable(struct pci_dev *dev, int enable)
44{
45 int pos;
46 u16 control;
47
48 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
49 if (pos) {
50 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
51 control &= ~PCI_MSIX_FLAGS_ENABLE;
52 if (enable)
53 control |= PCI_MSIX_FLAGS_ENABLE;
54 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
55 }
56}
57
58static void msix_flush_writes(unsigned int irq)
59{
60 struct msi_desc *entry;
61
62 entry = get_irq_msi(irq);
63 BUG_ON(!entry || !entry->dev);
64 switch (entry->msi_attrib.type) {
65 case PCI_CAP_ID_MSI:
66
67 break;
68 case PCI_CAP_ID_MSIX:
69 {
70 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
71 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
72 readl(entry->mask_base + offset);
73 break;
74 }
75 default:
76 BUG();
77 break;
78 }
79}
80
81static void msi_set_mask_bit(unsigned int irq, int flag)
82{
83 struct msi_desc *entry;
84
85 entry = get_irq_msi(irq);
86 BUG_ON(!entry || !entry->dev);
87 switch (entry->msi_attrib.type) {
88 case PCI_CAP_ID_MSI:
89 if (entry->msi_attrib.maskbit) {
90 int pos;
91 u32 mask_bits;
92
93 pos = (long)entry->mask_base;
94 pci_read_config_dword(entry->dev, pos, &mask_bits);
95 mask_bits &= ~(1);
96 mask_bits |= flag;
97 pci_write_config_dword(entry->dev, pos, mask_bits);
98 } else {
99 msi_set_enable(entry->dev, !flag);
100 }
101 break;
102 case PCI_CAP_ID_MSIX:
103 {
104 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
105 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET;
106 writel(flag, entry->mask_base + offset);
107 readl(entry->mask_base + offset);
108 break;
109 }
110 default:
111 BUG();
112 break;
113 }
114 entry->msi_attrib.masked = !!flag;
115}
116
117void read_msi_msg(unsigned int irq, struct msi_msg *msg)
118{
119 struct msi_desc *entry = get_irq_msi(irq);
120 switch(entry->msi_attrib.type) {
121 case PCI_CAP_ID_MSI:
122 {
123 struct pci_dev *dev = entry->dev;
124 int pos = entry->msi_attrib.pos;
125 u16 data;
126
127 pci_read_config_dword(dev, msi_lower_address_reg(pos),
128 &msg->address_lo);
129 if (entry->msi_attrib.is_64) {
130 pci_read_config_dword(dev, msi_upper_address_reg(pos),
131 &msg->address_hi);
132 pci_read_config_word(dev, msi_data_reg(pos, 1), &data);
133 } else {
134 msg->address_hi = 0;
135 pci_read_config_word(dev, msi_data_reg(pos, 0), &data);
136 }
137 msg->data = data;
138 break;
139 }
140 case PCI_CAP_ID_MSIX:
141 {
142 void __iomem *base;
143 base = entry->mask_base +
144 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
145
146 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
147 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
148 msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET);
149 break;
150 }
151 default:
152 BUG();
153 }
154}
155
156void write_msi_msg(unsigned int irq, struct msi_msg *msg)
157{
158 struct msi_desc *entry = get_irq_msi(irq);
159 switch (entry->msi_attrib.type) {
160 case PCI_CAP_ID_MSI:
161 {
162 struct pci_dev *dev = entry->dev;
163 int pos = entry->msi_attrib.pos;
164
165 pci_write_config_dword(dev, msi_lower_address_reg(pos),
166 msg->address_lo);
167 if (entry->msi_attrib.is_64) {
168 pci_write_config_dword(dev, msi_upper_address_reg(pos),
169 msg->address_hi);
170 pci_write_config_word(dev, msi_data_reg(pos, 1),
171 msg->data);
172 } else {
173 pci_write_config_word(dev, msi_data_reg(pos, 0),
174 msg->data);
175 }
176 break;
177 }
178 case PCI_CAP_ID_MSIX:
179 {
180 void __iomem *base;
181 base = entry->mask_base +
182 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
183
184 writel(msg->address_lo,
185 base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET);
186 writel(msg->address_hi,
187 base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET);
188 writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET);
189 break;
190 }
191 default:
192 BUG();
193 }
194 entry->msg = *msg;
195}
196
197void mask_msi_irq(unsigned int irq)
198{
199 msi_set_mask_bit(irq, 1);
200 msix_flush_writes(irq);
201}
202
203void unmask_msi_irq(unsigned int irq)
204{
205 msi_set_mask_bit(irq, 0);
206 msix_flush_writes(irq);
207}
208
209static int msi_free_irqs(struct pci_dev* dev);
210
211
212static struct msi_desc* alloc_msi_entry(void)
213{
214 struct msi_desc *entry;
215
216 entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL);
217 if (!entry)
218 return NULL;
219
220 INIT_LIST_HEAD(&entry->list);
221 entry->irq = 0;
222 entry->dev = NULL;
223
224 return entry;
225}
226
227static void pci_intx_for_msi(struct pci_dev *dev, int enable)
228{
229 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
230 pci_intx(dev, enable);
231}
232
233#ifdef CONFIG_PM
234static void __pci_restore_msi_state(struct pci_dev *dev)
235{
236 int pos;
237 u16 control;
238 struct msi_desc *entry;
239
240 if (!dev->msi_enabled)
241 return;
242
243 entry = get_irq_msi(dev->irq);
244 pos = entry->msi_attrib.pos;
245
246 pci_intx_for_msi(dev, 0);
247 msi_set_enable(dev, 0);
248 write_msi_msg(dev->irq, &entry->msg);
249 if (entry->msi_attrib.maskbit)
250 msi_set_mask_bit(dev->irq, entry->msi_attrib.masked);
251
252 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
253 control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
254 if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked)
255 control |= PCI_MSI_FLAGS_ENABLE;
256 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
257}
258
259static void __pci_restore_msix_state(struct pci_dev *dev)
260{
261 int pos;
262 struct msi_desc *entry;
263 u16 control;
264
265 if (!dev->msix_enabled)
266 return;
267
268
269 pci_intx_for_msi(dev, 0);
270 msix_set_enable(dev, 0);
271
272 list_for_each_entry(entry, &dev->msi_list, list) {
273 write_msi_msg(entry->irq, &entry->msg);
274 msi_set_mask_bit(entry->irq, entry->msi_attrib.masked);
275 }
276
277 BUG_ON(list_empty(&dev->msi_list));
278 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
279 pos = entry->msi_attrib.pos;
280 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
281 control &= ~PCI_MSIX_FLAGS_MASKALL;
282 control |= PCI_MSIX_FLAGS_ENABLE;
283 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
284}
285
286void pci_restore_msi_state(struct pci_dev *dev)
287{
288 __pci_restore_msi_state(dev);
289 __pci_restore_msix_state(dev);
290}
291#endif
292
293
294
295
296
297
298
299
300
301
302static int msi_capability_init(struct pci_dev *dev)
303{
304 struct msi_desc *entry;
305 int pos, ret;
306 u16 control;
307
308 msi_set_enable(dev, 0);
309
310 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
311 pci_read_config_word(dev, msi_control_reg(pos), &control);
312
313 entry = alloc_msi_entry();
314 if (!entry)
315 return -ENOMEM;
316
317 entry->msi_attrib.type = PCI_CAP_ID_MSI;
318 entry->msi_attrib.is_64 = is_64bit_address(control);
319 entry->msi_attrib.entry_nr = 0;
320 entry->msi_attrib.maskbit = is_mask_bit_support(control);
321 entry->msi_attrib.masked = 1;
322 entry->msi_attrib.default_irq = dev->irq;
323 entry->msi_attrib.pos = pos;
324 if (is_mask_bit_support(control)) {
325 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos,
326 is_64bit_address(control));
327 }
328 entry->dev = dev;
329 if (entry->msi_attrib.maskbit) {
330 unsigned int maskbits, temp;
331
332 pci_read_config_dword(dev,
333 msi_mask_bits_reg(pos, is_64bit_address(control)),
334 &maskbits);
335 temp = (1 << multi_msi_capable(control));
336 temp = ((temp - 1) & ~temp);
337 maskbits |= temp;
338 pci_write_config_dword(dev,
339 msi_mask_bits_reg(pos, is_64bit_address(control)),
340 maskbits);
341 }
342 list_add_tail(&entry->list, &dev->msi_list);
343
344
345 ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI);
346 if (ret) {
347 msi_free_irqs(dev);
348 return ret;
349 }
350
351
352 pci_intx_for_msi(dev, 0);
353 msi_set_enable(dev, 1);
354 dev->msi_enabled = 1;
355
356 dev->irq = entry->irq;
357 return 0;
358}
359
360
361
362
363
364
365
366
367
368
369
370static int msix_capability_init(struct pci_dev *dev,
371 struct msix_entry *entries, int nvec)
372{
373 struct msi_desc *entry;
374 int pos, i, j, nr_entries, ret;
375 unsigned long phys_addr;
376 u32 table_offset;
377 u16 control;
378 u8 bir;
379 void __iomem *base;
380
381 msix_set_enable(dev, 0);
382
383 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
384
385 pci_read_config_word(dev, msi_control_reg(pos), &control);
386 nr_entries = multi_msix_capable(control);
387
388 pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
389 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
390 table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
391 phys_addr = pci_resource_start (dev, bir) + table_offset;
392 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
393 if (base == NULL)
394 return -ENOMEM;
395
396
397 for (i = 0; i < nvec; i++) {
398 entry = alloc_msi_entry();
399 if (!entry)
400 break;
401
402 j = entries[i].entry;
403 entry->msi_attrib.type = PCI_CAP_ID_MSIX;
404 entry->msi_attrib.is_64 = 1;
405 entry->msi_attrib.entry_nr = j;
406 entry->msi_attrib.maskbit = 1;
407 entry->msi_attrib.masked = 1;
408 entry->msi_attrib.default_irq = dev->irq;
409 entry->msi_attrib.pos = pos;
410 entry->dev = dev;
411 entry->mask_base = base;
412
413 list_add_tail(&entry->list, &dev->msi_list);
414 }
415
416 ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
417 if (ret) {
418 int avail = 0;
419 list_for_each_entry(entry, &dev->msi_list, list) {
420 if (entry->irq != 0) {
421 avail++;
422 }
423 }
424
425 msi_free_irqs(dev);
426
427
428
429
430 if (avail == 0)
431 avail = ret;
432 return avail;
433 }
434
435 i = 0;
436 list_for_each_entry(entry, &dev->msi_list, list) {
437 entries[i].vector = entry->irq;
438 set_irq_msi(entry->irq, entry);
439 i++;
440 }
441
442 pci_intx_for_msi(dev, 0);
443 msix_set_enable(dev, 1);
444 dev->msix_enabled = 1;
445
446 return 0;
447}
448
449
450
451
452
453
454
455
456
457
458
459static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
460{
461 struct pci_bus *bus;
462 int ret;
463
464
465 if (!pci_msi_enable || !dev || dev->no_msi)
466 return -EINVAL;
467
468
469
470
471
472
473 if (nvec < 1)
474 return -ERANGE;
475
476
477
478
479
480
481
482 for (bus = dev->bus; bus; bus = bus->parent)
483 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
484 return -EINVAL;
485
486 ret = arch_msi_check_device(dev, nvec, type);
487 if (ret)
488 return ret;
489
490 if (!pci_find_capability(dev, type))
491 return -EINVAL;
492
493 return 0;
494}
495
496
497
498
499
500
501
502
503
504
505
506int pci_enable_msi(struct pci_dev* dev)
507{
508 int status;
509
510 status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI);
511 if (status)
512 return status;
513
514 WARN_ON(!!dev->msi_enabled);
515
516
517 if (dev->msix_enabled) {
518 printk(KERN_INFO "PCI: %s: Can't enable MSI. "
519 "Device already has MSI-X enabled\n",
520 pci_name(dev));
521 return -EINVAL;
522 }
523 status = msi_capability_init(dev);
524 return status;
525}
526EXPORT_SYMBOL(pci_enable_msi);
527
528void pci_disable_msi(struct pci_dev* dev)
529{
530 struct msi_desc *entry;
531 int default_irq;
532
533 if (!pci_msi_enable || !dev || !dev->msi_enabled)
534 return;
535
536 msi_set_enable(dev, 0);
537 pci_intx_for_msi(dev, 1);
538 dev->msi_enabled = 0;
539
540 BUG_ON(list_empty(&dev->msi_list));
541 entry = list_entry(dev->msi_list.next, struct msi_desc, list);
542 if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
543 return;
544 }
545
546 default_irq = entry->msi_attrib.default_irq;
547 msi_free_irqs(dev);
548
549
550 dev->irq = default_irq;
551}
552EXPORT_SYMBOL(pci_disable_msi);
553
554static int msi_free_irqs(struct pci_dev* dev)
555{
556 struct msi_desc *entry, *tmp;
557
558 list_for_each_entry(entry, &dev->msi_list, list) {
559 if (entry->irq)
560 BUG_ON(irq_has_action(entry->irq));
561 }
562
563 arch_teardown_msi_irqs(dev);
564
565 list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
566 if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) {
567 writel(1, entry->mask_base + entry->msi_attrib.entry_nr
568 * PCI_MSIX_ENTRY_SIZE
569 + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
570
571 if (list_is_last(&entry->list, &dev->msi_list))
572 iounmap(entry->mask_base);
573 }
574 list_del(&entry->list);
575 kfree(entry);
576 }
577
578 return 0;
579}
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
597{
598 int status, pos, nr_entries;
599 int i, j;
600 u16 control;
601
602 if (!entries)
603 return -EINVAL;
604
605 status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
606 if (status)
607 return status;
608
609 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
610 pci_read_config_word(dev, msi_control_reg(pos), &control);
611 nr_entries = multi_msix_capable(control);
612 if (nvec > nr_entries)
613 return -EINVAL;
614
615
616 for (i = 0; i < nvec; i++) {
617 if (entries[i].entry >= nr_entries)
618 return -EINVAL;
619 for (j = i + 1; j < nvec; j++) {
620 if (entries[i].entry == entries[j].entry)
621 return -EINVAL;
622 }
623 }
624 WARN_ON(!!dev->msix_enabled);
625
626
627 if (dev->msi_enabled) {
628 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
629 "Device already has an MSI irq assigned\n",
630 pci_name(dev));
631 return -EINVAL;
632 }
633 status = msix_capability_init(dev, entries, nvec);
634 return status;
635}
636EXPORT_SYMBOL(pci_enable_msix);
637
638static void msix_free_all_irqs(struct pci_dev *dev)
639{
640 msi_free_irqs(dev);
641}
642
643void pci_disable_msix(struct pci_dev* dev)
644{
645 if (!pci_msi_enable || !dev || !dev->msix_enabled)
646 return;
647
648 msix_set_enable(dev, 0);
649 pci_intx_for_msi(dev, 1);
650 dev->msix_enabled = 0;
651
652 msix_free_all_irqs(dev);
653}
654EXPORT_SYMBOL(pci_disable_msix);
655
656
657
658
659
660
661
662
663
664
665void msi_remove_pci_irq_vectors(struct pci_dev* dev)
666{
667 if (!pci_msi_enable || !dev)
668 return;
669
670 if (dev->msi_enabled)
671 msi_free_irqs(dev);
672
673 if (dev->msix_enabled)
674 msix_free_all_irqs(dev);
675}
676
677void pci_no_msi(void)
678{
679 pci_msi_enable = 0;
680}
681
682void pci_msi_init_pci_dev(struct pci_dev *dev)
683{
684 INIT_LIST_HEAD(&dev->msi_list);
685}
686
687
688
689
690int __attribute__ ((weak))
691arch_msi_check_device(struct pci_dev* dev, int nvec, int type)
692{
693 return 0;
694}
695
696int __attribute__ ((weak))
697arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry)
698{
699 return 0;
700}
701
702int __attribute__ ((weak))
703arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
704{
705 struct msi_desc *entry;
706 int ret;
707
708 list_for_each_entry(entry, &dev->msi_list, list) {
709 ret = arch_setup_msi_irq(dev, entry);
710 if (ret)
711 return ret;
712 }
713
714 return 0;
715}
716
717void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq)
718{
719 return;
720}
721
722void __attribute__ ((weak))
723arch_teardown_msi_irqs(struct pci_dev *dev)
724{
725 struct msi_desc *entry;
726
727 list_for_each_entry(entry, &dev->msi_list, list) {
728 if (entry->irq != 0)
729 arch_teardown_msi_irq(entry->irq);
730 }
731}
732