1
2
3
4
5
6
7
8
9#include <linux/err.h>
10#include <linux/mm.h>
11#include <linux/irq.h>
12#include <linux/interrupt.h>
13#include <linux/export.h>
14#include <linux/ioport.h>
15#include <linux/pci.h>
16#include <linux/proc_fs.h>
17#include <linux/msi.h>
18#include <linux/smp.h>
19#include <linux/errno.h>
20#include <linux/io.h>
21#include <linux/slab.h>
22#include <linux/irqdomain.h>
23
24#include "pci.h"
25
26static int pci_msi_enable = 1;
27int pci_msi_ignore_mask;
28
29#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
30
31#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
32static struct irq_domain *pci_msi_default_domain;
33static DEFINE_MUTEX(pci_msi_domain_lock);
34
35struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev)
36{
37 return pci_msi_default_domain;
38}
39
40static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
41{
42 struct irq_domain *domain;
43
44 domain = dev_get_msi_domain(&dev->dev);
45 if (domain)
46 return domain;
47
48 return arch_get_pci_msi_domain(dev);
49}
50
51static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
52{
53 struct irq_domain *domain;
54
55 domain = pci_msi_get_domain(dev);
56 if (domain)
57 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
58
59 return arch_setup_msi_irqs(dev, nvec, type);
60}
61
62static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
63{
64 struct irq_domain *domain;
65
66 domain = pci_msi_get_domain(dev);
67 if (domain)
68 pci_msi_domain_free_irqs(domain, dev);
69 else
70 arch_teardown_msi_irqs(dev);
71}
72#else
73#define pci_msi_setup_msi_irqs arch_setup_msi_irqs
74#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
75#endif
76
77
78
79int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
80{
81 struct msi_controller *chip = dev->bus->msi;
82 int err;
83
84 if (!chip || !chip->setup_irq)
85 return -EINVAL;
86
87 err = chip->setup_irq(chip, dev, desc);
88 if (err < 0)
89 return err;
90
91 irq_set_chip_data(desc->irq, chip);
92
93 return 0;
94}
95
96void __weak arch_teardown_msi_irq(unsigned int irq)
97{
98 struct msi_controller *chip = irq_get_chip_data(irq);
99
100 if (!chip || !chip->teardown_irq)
101 return;
102
103 chip->teardown_irq(chip, irq);
104}
105
106int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
107{
108 struct msi_desc *entry;
109 int ret;
110
111
112
113
114
115 if (type == PCI_CAP_ID_MSI && nvec > 1)
116 return 1;
117
118 for_each_pci_msi_entry(entry, dev) {
119 ret = arch_setup_msi_irq(dev, entry);
120 if (ret < 0)
121 return ret;
122 if (ret > 0)
123 return -ENOSPC;
124 }
125
126 return 0;
127}
128
129
130
131
132
133void default_teardown_msi_irqs(struct pci_dev *dev)
134{
135 int i;
136 struct msi_desc *entry;
137
138 for_each_pci_msi_entry(entry, dev)
139 if (entry->irq)
140 for (i = 0; i < entry->nvec_used; i++)
141 arch_teardown_msi_irq(entry->irq + i);
142}
143
144void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
145{
146 return default_teardown_msi_irqs(dev);
147}
148
149static void default_restore_msi_irq(struct pci_dev *dev, int irq)
150{
151 struct msi_desc *entry;
152
153 entry = NULL;
154 if (dev->msix_enabled) {
155 for_each_pci_msi_entry(entry, dev) {
156 if (irq == entry->irq)
157 break;
158 }
159 } else if (dev->msi_enabled) {
160 entry = irq_get_msi_desc(irq);
161 }
162
163 if (entry)
164 __pci_write_msi_msg(entry, &entry->msg);
165}
166
167void __weak arch_restore_msi_irqs(struct pci_dev *dev)
168{
169 return default_restore_msi_irqs(dev);
170}
171
172static inline __attribute_const__ u32 msi_mask(unsigned x)
173{
174
175 if (x >= 5)
176 return 0xffffffff;
177 return (1 << (1 << x)) - 1;
178}
179
180
181
182
183
184
185
186u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
187{
188 u32 mask_bits = desc->masked;
189
190 if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
191 return 0;
192
193 mask_bits &= ~mask;
194 mask_bits |= flag;
195 pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
196 mask_bits);
197
198 return mask_bits;
199}
200
201static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
202{
203 desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
204}
205
206
207
208
209
210
211
212
213u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
214{
215 u32 mask_bits = desc->masked;
216 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
217 PCI_MSIX_ENTRY_VECTOR_CTRL;
218
219 if (pci_msi_ignore_mask)
220 return 0;
221
222 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
223 if (flag)
224 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
225 writel(mask_bits, desc->mask_base + offset);
226
227 return mask_bits;
228}
229
230static void msix_mask_irq(struct msi_desc *desc, u32 flag)
231{
232 desc->masked = __pci_msix_desc_mask_irq(desc, flag);
233}
234
235static void msi_set_mask_bit(struct irq_data *data, u32 flag)
236{
237 struct msi_desc *desc = irq_data_get_msi_desc(data);
238
239 if (desc->msi_attrib.is_msix) {
240 msix_mask_irq(desc, flag);
241 readl(desc->mask_base);
242 } else {
243 unsigned offset = data->irq - desc->irq;
244 msi_mask_irq(desc, 1 << offset, flag << offset);
245 }
246}
247
248
249
250
251
252void pci_msi_mask_irq(struct irq_data *data)
253{
254 msi_set_mask_bit(data, 1);
255}
256
257
258
259
260
261void pci_msi_unmask_irq(struct irq_data *data)
262{
263 msi_set_mask_bit(data, 0);
264}
265
266void default_restore_msi_irqs(struct pci_dev *dev)
267{
268 struct msi_desc *entry;
269
270 for_each_pci_msi_entry(entry, dev)
271 default_restore_msi_irq(dev, entry->irq);
272}
273
274void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
275{
276 struct pci_dev *dev = msi_desc_to_pci_dev(entry);
277
278 BUG_ON(dev->current_state != PCI_D0);
279
280 if (entry->msi_attrib.is_msix) {
281 void __iomem *base = entry->mask_base +
282 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
283
284 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
285 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
286 msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
287 } else {
288 int pos = dev->msi_cap;
289 u16 data;
290
291 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
292 &msg->address_lo);
293 if (entry->msi_attrib.is_64) {
294 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
295 &msg->address_hi);
296 pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
297 } else {
298 msg->address_hi = 0;
299 pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
300 }
301 msg->data = data;
302 }
303}
304
305void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
306{
307 struct pci_dev *dev = msi_desc_to_pci_dev(entry);
308
309 if (dev->current_state != PCI_D0) {
310
311 } else if (entry->msi_attrib.is_msix) {
312 void __iomem *base;
313 base = entry->mask_base +
314 entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
315
316 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
317 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
318 writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
319 } else {
320 int pos = dev->msi_cap;
321 u16 msgctl;
322
323 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
324 msgctl &= ~PCI_MSI_FLAGS_QSIZE;
325 msgctl |= entry->msi_attrib.multiple << 4;
326 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
327
328 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
329 msg->address_lo);
330 if (entry->msi_attrib.is_64) {
331 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
332 msg->address_hi);
333 pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
334 msg->data);
335 } else {
336 pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
337 msg->data);
338 }
339 }
340 entry->msg = *msg;
341}
342
343void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
344{
345 struct msi_desc *entry = irq_get_msi_desc(irq);
346
347 __pci_write_msi_msg(entry, msg);
348}
349EXPORT_SYMBOL_GPL(pci_write_msi_msg);
350
351static void free_msi_irqs(struct pci_dev *dev)
352{
353 struct list_head *msi_list = dev_to_msi_list(&dev->dev);
354 struct msi_desc *entry, *tmp;
355 struct attribute **msi_attrs;
356 struct device_attribute *dev_attr;
357 int i, count = 0;
358
359 for_each_pci_msi_entry(entry, dev)
360 if (entry->irq)
361 for (i = 0; i < entry->nvec_used; i++)
362 BUG_ON(irq_has_action(entry->irq + i));
363
364 pci_msi_teardown_msi_irqs(dev);
365
366 list_for_each_entry_safe(entry, tmp, msi_list, list) {
367 if (entry->msi_attrib.is_msix) {
368 if (list_is_last(&entry->list, msi_list))
369 iounmap(entry->mask_base);
370 }
371
372 list_del(&entry->list);
373 kfree(entry);
374 }
375
376 if (dev->msi_irq_groups) {
377 sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
378 msi_attrs = dev->msi_irq_groups[0]->attrs;
379 while (msi_attrs[count]) {
380 dev_attr = container_of(msi_attrs[count],
381 struct device_attribute, attr);
382 kfree(dev_attr->attr.name);
383 kfree(dev_attr);
384 ++count;
385 }
386 kfree(msi_attrs);
387 kfree(dev->msi_irq_groups[0]);
388 kfree(dev->msi_irq_groups);
389 dev->msi_irq_groups = NULL;
390 }
391}
392
393static void pci_intx_for_msi(struct pci_dev *dev, int enable)
394{
395 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
396 pci_intx(dev, enable);
397}
398
399static void __pci_restore_msi_state(struct pci_dev *dev)
400{
401 u16 control;
402 struct msi_desc *entry;
403
404 if (!dev->msi_enabled)
405 return;
406
407 entry = irq_get_msi_desc(dev->irq);
408
409 pci_intx_for_msi(dev, 0);
410 pci_msi_set_enable(dev, 0);
411 arch_restore_msi_irqs(dev);
412
413 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
414 msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
415 entry->masked);
416 control &= ~PCI_MSI_FLAGS_QSIZE;
417 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
418 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
419}
420
421static void __pci_restore_msix_state(struct pci_dev *dev)
422{
423 struct msi_desc *entry;
424
425 if (!dev->msix_enabled)
426 return;
427 BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
428
429
430 pci_intx_for_msi(dev, 0);
431 pci_msix_clear_and_set_ctrl(dev, 0,
432 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
433
434 arch_restore_msi_irqs(dev);
435 for_each_pci_msi_entry(entry, dev)
436 msix_mask_irq(entry, entry->masked);
437
438 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
439}
440
441void pci_restore_msi_state(struct pci_dev *dev)
442{
443 __pci_restore_msi_state(dev);
444 __pci_restore_msix_state(dev);
445}
446EXPORT_SYMBOL_GPL(pci_restore_msi_state);
447
448static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
449 char *buf)
450{
451 struct msi_desc *entry;
452 unsigned long irq;
453 int retval;
454
455 retval = kstrtoul(attr->attr.name, 10, &irq);
456 if (retval)
457 return retval;
458
459 entry = irq_get_msi_desc(irq);
460 if (entry)
461 return sprintf(buf, "%s\n",
462 entry->msi_attrib.is_msix ? "msix" : "msi");
463
464 return -ENODEV;
465}
466
467static int populate_msi_sysfs(struct pci_dev *pdev)
468{
469 struct attribute **msi_attrs;
470 struct attribute *msi_attr;
471 struct device_attribute *msi_dev_attr;
472 struct attribute_group *msi_irq_group;
473 const struct attribute_group **msi_irq_groups;
474 struct msi_desc *entry;
475 int ret = -ENOMEM;
476 int num_msi = 0;
477 int count = 0;
478
479
480 for_each_pci_msi_entry(entry, pdev)
481 ++num_msi;
482 if (!num_msi)
483 return 0;
484
485
486 msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
487 if (!msi_attrs)
488 return -ENOMEM;
489 for_each_pci_msi_entry(entry, pdev) {
490 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
491 if (!msi_dev_attr)
492 goto error_attrs;
493 msi_attrs[count] = &msi_dev_attr->attr;
494
495 sysfs_attr_init(&msi_dev_attr->attr);
496 msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
497 entry->irq);
498 if (!msi_dev_attr->attr.name)
499 goto error_attrs;
500 msi_dev_attr->attr.mode = S_IRUGO;
501 msi_dev_attr->show = msi_mode_show;
502 ++count;
503 }
504
505 msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
506 if (!msi_irq_group)
507 goto error_attrs;
508 msi_irq_group->name = "msi_irqs";
509 msi_irq_group->attrs = msi_attrs;
510
511 msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL);
512 if (!msi_irq_groups)
513 goto error_irq_group;
514 msi_irq_groups[0] = msi_irq_group;
515
516 ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
517 if (ret)
518 goto error_irq_groups;
519 pdev->msi_irq_groups = msi_irq_groups;
520
521 return 0;
522
523error_irq_groups:
524 kfree(msi_irq_groups);
525error_irq_group:
526 kfree(msi_irq_group);
527error_attrs:
528 count = 0;
529 msi_attr = msi_attrs[count];
530 while (msi_attr) {
531 msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
532 kfree(msi_attr->name);
533 kfree(msi_dev_attr);
534 ++count;
535 msi_attr = msi_attrs[count];
536 }
537 kfree(msi_attrs);
538 return ret;
539}
540
541static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
542{
543 u16 control;
544 struct msi_desc *entry;
545
546
547 entry = alloc_msi_entry(&dev->dev);
548 if (!entry)
549 return NULL;
550
551 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
552
553 entry->msi_attrib.is_msix = 0;
554 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
555 entry->msi_attrib.entry_nr = 0;
556 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
557 entry->msi_attrib.default_irq = dev->irq;
558 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
559 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
560 entry->nvec_used = nvec;
561
562 if (control & PCI_MSI_FLAGS_64BIT)
563 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
564 else
565 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
566
567
568 if (entry->msi_attrib.maskbit)
569 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
570
571 return entry;
572}
573
574static int msi_verify_entries(struct pci_dev *dev)
575{
576 struct msi_desc *entry;
577
578 for_each_pci_msi_entry(entry, dev) {
579 if (!dev->no_64bit_msi || !entry->msg.address_hi)
580 continue;
581 dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
582 " tried to assign one above 4G\n");
583 return -EIO;
584 }
585 return 0;
586}
587
588
589
590
591
592
593
594
595
596
597
598
599static int msi_capability_init(struct pci_dev *dev, int nvec)
600{
601 struct msi_desc *entry;
602 int ret;
603 unsigned mask;
604
605 pci_msi_set_enable(dev, 0);
606
607 entry = msi_setup_entry(dev, nvec);
608 if (!entry)
609 return -ENOMEM;
610
611
612 mask = msi_mask(entry->msi_attrib.multi_cap);
613 msi_mask_irq(entry, mask, mask);
614
615 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
616
617
618 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
619 if (ret) {
620 msi_mask_irq(entry, mask, ~mask);
621 free_msi_irqs(dev);
622 return ret;
623 }
624
625 ret = msi_verify_entries(dev);
626 if (ret) {
627 msi_mask_irq(entry, mask, ~mask);
628 free_msi_irqs(dev);
629 return ret;
630 }
631
632 ret = populate_msi_sysfs(dev);
633 if (ret) {
634 msi_mask_irq(entry, mask, ~mask);
635 free_msi_irqs(dev);
636 return ret;
637 }
638
639
640 pci_intx_for_msi(dev, 0);
641 pci_msi_set_enable(dev, 1);
642 dev->msi_enabled = 1;
643
644 pcibios_free_irq(dev);
645 dev->irq = entry->irq;
646 return 0;
647}
648
649static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
650{
651 resource_size_t phys_addr;
652 u32 table_offset;
653 unsigned long flags;
654 u8 bir;
655
656 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
657 &table_offset);
658 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
659 flags = pci_resource_flags(dev, bir);
660 if (!flags || (flags & IORESOURCE_UNSET))
661 return NULL;
662
663 table_offset &= PCI_MSIX_TABLE_OFFSET;
664 phys_addr = pci_resource_start(dev, bir) + table_offset;
665
666 return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
667}
668
669static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
670 struct msix_entry *entries, int nvec)
671{
672 struct msi_desc *entry;
673 int i;
674
675 for (i = 0; i < nvec; i++) {
676 entry = alloc_msi_entry(&dev->dev);
677 if (!entry) {
678 if (!i)
679 iounmap(base);
680 else
681 free_msi_irqs(dev);
682
683 return -ENOMEM;
684 }
685
686 entry->msi_attrib.is_msix = 1;
687 entry->msi_attrib.is_64 = 1;
688 entry->msi_attrib.entry_nr = entries[i].entry;
689 entry->msi_attrib.default_irq = dev->irq;
690 entry->mask_base = base;
691 entry->nvec_used = 1;
692
693 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
694 }
695
696 return 0;
697}
698
699static void msix_program_entries(struct pci_dev *dev,
700 struct msix_entry *entries)
701{
702 struct msi_desc *entry;
703 int i = 0;
704
705 for_each_pci_msi_entry(entry, dev) {
706 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
707 PCI_MSIX_ENTRY_VECTOR_CTRL;
708
709 entries[i].vector = entry->irq;
710 entry->masked = readl(entry->mask_base + offset);
711 msix_mask_irq(entry, 1);
712 i++;
713 }
714}
715
716
717
718
719
720
721
722
723
724
725
726static int msix_capability_init(struct pci_dev *dev,
727 struct msix_entry *entries, int nvec)
728{
729 int ret;
730 u16 control;
731 void __iomem *base;
732
733
734 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
735
736 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
737
738 base = msix_map_region(dev, msix_table_size(control));
739 if (!base)
740 return -ENOMEM;
741
742 ret = msix_setup_entries(dev, base, entries, nvec);
743 if (ret)
744 return ret;
745
746 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
747 if (ret)
748 goto out_avail;
749
750
751 ret = msi_verify_entries(dev);
752 if (ret)
753 goto out_free;
754
755
756
757
758
759
760 pci_msix_clear_and_set_ctrl(dev, 0,
761 PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
762
763 msix_program_entries(dev, entries);
764
765 ret = populate_msi_sysfs(dev);
766 if (ret)
767 goto out_free;
768
769
770 pci_intx_for_msi(dev, 0);
771 dev->msix_enabled = 1;
772 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
773
774 pcibios_free_irq(dev);
775 return 0;
776
777out_avail:
778 if (ret < 0) {
779
780
781
782
783 struct msi_desc *entry;
784 int avail = 0;
785
786 for_each_pci_msi_entry(entry, dev) {
787 if (entry->irq != 0)
788 avail++;
789 }
790 if (avail != 0)
791 ret = avail;
792 }
793
794out_free:
795 free_msi_irqs(dev);
796
797 return ret;
798}
799
800
801
802
803
804
805
806
807
808
809static int pci_msi_supported(struct pci_dev *dev, int nvec)
810{
811 struct pci_bus *bus;
812
813
814 if (!pci_msi_enable)
815 return 0;
816
817 if (!dev || dev->no_msi || dev->current_state != PCI_D0)
818 return 0;
819
820
821
822
823
824
825 if (nvec < 1)
826 return 0;
827
828
829
830
831
832
833
834
835 for (bus = dev->bus; bus; bus = bus->parent)
836 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
837 return 0;
838
839 return 1;
840}
841
842
843
844
845
846
847
848
849
850
851
852int pci_msi_vec_count(struct pci_dev *dev)
853{
854 int ret;
855 u16 msgctl;
856
857 if (!dev->msi_cap)
858 return -EINVAL;
859
860 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
861 ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
862
863 return ret;
864}
865EXPORT_SYMBOL(pci_msi_vec_count);
866
867void pci_msi_shutdown(struct pci_dev *dev)
868{
869 struct msi_desc *desc;
870 u32 mask;
871
872 if (!pci_msi_enable || !dev || !dev->msi_enabled)
873 return;
874
875 BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
876 desc = first_pci_msi_entry(dev);
877
878 pci_msi_set_enable(dev, 0);
879 pci_intx_for_msi(dev, 1);
880 dev->msi_enabled = 0;
881
882
883 mask = msi_mask(desc->msi_attrib.multi_cap);
884
885 __pci_msi_desc_mask_irq(desc, mask, ~mask);
886
887
888 dev->irq = desc->msi_attrib.default_irq;
889 pcibios_alloc_irq(dev);
890}
891
892void pci_disable_msi(struct pci_dev *dev)
893{
894 if (!pci_msi_enable || !dev || !dev->msi_enabled)
895 return;
896
897 pci_msi_shutdown(dev);
898 free_msi_irqs(dev);
899}
900EXPORT_SYMBOL(pci_disable_msi);
901
902
903
904
905
906
907
908
909
910int pci_msix_vec_count(struct pci_dev *dev)
911{
912 u16 control;
913
914 if (!dev->msix_cap)
915 return -EINVAL;
916
917 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
918 return msix_table_size(control);
919}
920EXPORT_SYMBOL(pci_msix_vec_count);
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
938{
939 int nr_entries;
940 int i, j;
941
942 if (!pci_msi_supported(dev, nvec))
943 return -EINVAL;
944
945 if (!entries)
946 return -EINVAL;
947
948 nr_entries = pci_msix_vec_count(dev);
949 if (nr_entries < 0)
950 return nr_entries;
951 if (nvec > nr_entries)
952 return nr_entries;
953
954
955 for (i = 0; i < nvec; i++) {
956 if (entries[i].entry >= nr_entries)
957 return -EINVAL;
958 for (j = i + 1; j < nvec; j++) {
959 if (entries[i].entry == entries[j].entry)
960 return -EINVAL;
961 }
962 }
963 WARN_ON(!!dev->msix_enabled);
964
965
966 if (dev->msi_enabled) {
967 dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
968 return -EINVAL;
969 }
970 return msix_capability_init(dev, entries, nvec);
971}
972EXPORT_SYMBOL(pci_enable_msix);
973
974void pci_msix_shutdown(struct pci_dev *dev)
975{
976 struct msi_desc *entry;
977
978 if (!pci_msi_enable || !dev || !dev->msix_enabled)
979 return;
980
981
982 for_each_pci_msi_entry(entry, dev) {
983
984 __pci_msix_desc_mask_irq(entry, 1);
985 }
986
987 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
988 pci_intx_for_msi(dev, 1);
989 dev->msix_enabled = 0;
990 pcibios_alloc_irq(dev);
991}
992
993void pci_disable_msix(struct pci_dev *dev)
994{
995 if (!pci_msi_enable || !dev || !dev->msix_enabled)
996 return;
997
998 pci_msix_shutdown(dev);
999 free_msi_irqs(dev);
1000}
1001EXPORT_SYMBOL(pci_disable_msix);
1002
1003void pci_no_msi(void)
1004{
1005 pci_msi_enable = 0;
1006}
1007
1008
1009
1010
1011
1012
1013
1014int pci_msi_enabled(void)
1015{
1016 return pci_msi_enable;
1017}
1018EXPORT_SYMBOL(pci_msi_enabled);
1019
1020void pci_msi_init_pci_dev(struct pci_dev *dev)
1021{
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
1037{
1038 int nvec;
1039 int rc;
1040
1041 if (!pci_msi_supported(dev, minvec))
1042 return -EINVAL;
1043
1044 WARN_ON(!!dev->msi_enabled);
1045
1046
1047 if (dev->msix_enabled) {
1048 dev_info(&dev->dev,
1049 "can't enable MSI (MSI-X already enabled)\n");
1050 return -EINVAL;
1051 }
1052
1053 if (maxvec < minvec)
1054 return -ERANGE;
1055
1056 nvec = pci_msi_vec_count(dev);
1057 if (nvec < 0)
1058 return nvec;
1059 else if (nvec < minvec)
1060 return -EINVAL;
1061 else if (nvec > maxvec)
1062 nvec = maxvec;
1063
1064 do {
1065 rc = msi_capability_init(dev, nvec);
1066 if (rc < 0) {
1067 return rc;
1068 } else if (rc > 0) {
1069 if (rc < minvec)
1070 return -ENOSPC;
1071 nvec = rc;
1072 }
1073 } while (rc);
1074
1075 return nvec;
1076}
1077EXPORT_SYMBOL(pci_enable_msi_range);
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1095 int minvec, int maxvec)
1096{
1097 int nvec = maxvec;
1098 int rc;
1099
1100 if (maxvec < minvec)
1101 return -ERANGE;
1102
1103 do {
1104 rc = pci_enable_msix(dev, entries, nvec);
1105 if (rc < 0) {
1106 return rc;
1107 } else if (rc > 0) {
1108 if (rc < minvec)
1109 return -ENOSPC;
1110 nvec = rc;
1111 }
1112 } while (rc);
1113
1114 return nvec;
1115}
1116EXPORT_SYMBOL(pci_enable_msix_range);
1117
1118struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
1119{
1120 return to_pci_dev(desc->dev);
1121}
1122
1123void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
1124{
1125 struct pci_dev *dev = msi_desc_to_pci_dev(desc);
1126
1127 return dev->bus->sysdata;
1128}
1129EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
1130
1131#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
1132
1133
1134
1135
1136
1137void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
1138{
1139 struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
1140
1141
1142
1143
1144
1145 if (desc->irq == irq_data->irq)
1146 __pci_write_msi_msg(desc, msg);
1147}
1148
1149
1150
1151
1152
1153
1154
1155
1156irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
1157 struct msi_desc *desc)
1158{
1159 return (irq_hw_number_t)desc->msi_attrib.entry_nr |
1160 PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
1161 (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
1162}
1163
1164static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
1165{
1166 return !desc->msi_attrib.is_msix && desc->nvec_used > 1;
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180int pci_msi_domain_check_cap(struct irq_domain *domain,
1181 struct msi_domain_info *info, struct device *dev)
1182{
1183 struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
1184
1185
1186 if (pci_msi_desc_is_multi_msi(desc) &&
1187 !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
1188 return 1;
1189 else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX))
1190 return -ENOTSUPP;
1191
1192 return 0;
1193}
1194
1195static int pci_msi_domain_handle_error(struct irq_domain *domain,
1196 struct msi_desc *desc, int error)
1197{
1198
1199 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
1200 return 1;
1201
1202 return error;
1203}
1204
1205#ifdef GENERIC_MSI_DOMAIN_OPS
1206static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
1207 struct msi_desc *desc)
1208{
1209 arg->desc = desc;
1210 arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc),
1211 desc);
1212}
1213#else
1214#define pci_msi_domain_set_desc NULL
1215#endif
1216
1217static struct msi_domain_ops pci_msi_domain_ops_default = {
1218 .set_desc = pci_msi_domain_set_desc,
1219 .msi_check = pci_msi_domain_check_cap,
1220 .handle_error = pci_msi_domain_handle_error,
1221};
1222
1223static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
1224{
1225 struct msi_domain_ops *ops = info->ops;
1226
1227 if (ops == NULL) {
1228 info->ops = &pci_msi_domain_ops_default;
1229 } else {
1230 if (ops->set_desc == NULL)
1231 ops->set_desc = pci_msi_domain_set_desc;
1232 if (ops->msi_check == NULL)
1233 ops->msi_check = pci_msi_domain_check_cap;
1234 if (ops->handle_error == NULL)
1235 ops->handle_error = pci_msi_domain_handle_error;
1236 }
1237}
1238
1239static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
1240{
1241 struct irq_chip *chip = info->chip;
1242
1243 BUG_ON(!chip);
1244 if (!chip->irq_write_msi_msg)
1245 chip->irq_write_msi_msg = pci_msi_domain_write_msg;
1246 if (!chip->irq_mask)
1247 chip->irq_mask = pci_msi_mask_irq;
1248 if (!chip->irq_unmask)
1249 chip->irq_unmask = pci_msi_unmask_irq;
1250}
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
1264 struct msi_domain_info *info,
1265 struct irq_domain *parent)
1266{
1267 struct irq_domain *domain;
1268
1269 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
1270 pci_msi_domain_update_dom_ops(info);
1271 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
1272 pci_msi_domain_update_chip_ops(info);
1273
1274 domain = msi_create_irq_domain(node, info, parent);
1275 if (!domain)
1276 return NULL;
1277
1278 domain->bus_token = DOMAIN_BUS_PCI_MSI;
1279 return domain;
1280}
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
1293 int nvec, int type)
1294{
1295 return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
1296}
1297
1298
1299
1300
1301
1302
1303void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
1304{
1305 msi_domain_free_irqs(domain, &dev->dev);
1306}
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
1318 struct msi_domain_info *info, struct irq_domain *parent)
1319{
1320 struct irq_domain *domain;
1321
1322 mutex_lock(&pci_msi_domain_lock);
1323 if (pci_msi_default_domain) {
1324 pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
1325 domain = NULL;
1326 } else {
1327 domain = pci_msi_create_irq_domain(node, info, parent);
1328 pci_msi_default_domain = domain;
1329 }
1330 mutex_unlock(&pci_msi_domain_lock);
1331
1332 return domain;
1333}
1334#endif
1335