1
2
3
4
5
6
7
8
9
10#include <linux/err.h>
11#include <linux/mm.h>
12#include <linux/irq.h>
13#include <linux/interrupt.h>
14#include <linux/export.h>
15#include <linux/ioport.h>
16#include <linux/pci.h>
17#include <linux/proc_fs.h>
18#include <linux/msi.h>
19#include <linux/smp.h>
20#include <linux/errno.h>
21#include <linux/io.h>
22#include <linux/acpi_iort.h>
23#include <linux/slab.h>
24#include <linux/irqdomain.h>
25#include <linux/of_irq.h>
26
27#include "pci.h"
28
29static int pci_msi_enable = 1;
30int pci_msi_ignore_mask;
31
32#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
33
34#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
35static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
36{
37 struct irq_domain *domain;
38
39 domain = dev_get_msi_domain(&dev->dev);
40 if (domain && irq_domain_is_hierarchy(domain))
41 return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
42
43 return arch_setup_msi_irqs(dev, nvec, type);
44}
45
46static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
47{
48 struct irq_domain *domain;
49
50 domain = dev_get_msi_domain(&dev->dev);
51 if (domain && irq_domain_is_hierarchy(domain))
52 msi_domain_free_irqs(domain, &dev->dev);
53 else
54 arch_teardown_msi_irqs(dev);
55}
56#else
57#define pci_msi_setup_msi_irqs arch_setup_msi_irqs
58#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
59#endif
60
61
62
63int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
64{
65 struct msi_controller *chip = dev->bus->msi;
66 int err;
67
68 if (!chip || !chip->setup_irq)
69 return -EINVAL;
70
71 err = chip->setup_irq(chip, dev, desc);
72 if (err < 0)
73 return err;
74
75 irq_set_chip_data(desc->irq, chip);
76
77 return 0;
78}
79
80void __weak arch_teardown_msi_irq(unsigned int irq)
81{
82 struct msi_controller *chip = irq_get_chip_data(irq);
83
84 if (!chip || !chip->teardown_irq)
85 return;
86
87 chip->teardown_irq(chip, irq);
88}
89
90int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
91{
92 struct msi_controller *chip = dev->bus->msi;
93 struct msi_desc *entry;
94 int ret;
95
96 if (chip && chip->setup_irqs)
97 return chip->setup_irqs(chip, dev, nvec, type);
98
99
100
101
102 if (type == PCI_CAP_ID_MSI && nvec > 1)
103 return 1;
104
105 for_each_pci_msi_entry(entry, dev) {
106 ret = arch_setup_msi_irq(dev, entry);
107 if (ret < 0)
108 return ret;
109 if (ret > 0)
110 return -ENOSPC;
111 }
112
113 return 0;
114}
115
116
117
118
119
120void default_teardown_msi_irqs(struct pci_dev *dev)
121{
122 int i;
123 struct msi_desc *entry;
124
125 for_each_pci_msi_entry(entry, dev)
126 if (entry->irq)
127 for (i = 0; i < entry->nvec_used; i++)
128 arch_teardown_msi_irq(entry->irq + i);
129}
130
131void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
132{
133 return default_teardown_msi_irqs(dev);
134}
135
136static void default_restore_msi_irq(struct pci_dev *dev, int irq)
137{
138 struct msi_desc *entry;
139
140 entry = NULL;
141 if (dev->msix_enabled) {
142 for_each_pci_msi_entry(entry, dev) {
143 if (irq == entry->irq)
144 break;
145 }
146 } else if (dev->msi_enabled) {
147 entry = irq_get_msi_desc(irq);
148 }
149
150 if (entry)
151 __pci_write_msi_msg(entry, &entry->msg);
152}
153
154void __weak arch_restore_msi_irqs(struct pci_dev *dev)
155{
156 return default_restore_msi_irqs(dev);
157}
158
159static inline __attribute_const__ u32 msi_mask(unsigned x)
160{
161
162 if (x >= 5)
163 return 0xffffffff;
164 return (1 << (1 << x)) - 1;
165}
166
167
168
169
170
171
172
173u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
174{
175 u32 mask_bits = desc->masked;
176
177 if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
178 return 0;
179
180 mask_bits &= ~mask;
181 mask_bits |= flag;
182 pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
183 mask_bits);
184
185 return mask_bits;
186}
187
188static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
189{
190 desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
191}
192
193static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
194{
195 return desc->mask_base +
196 desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
197}
198
199
200
201
202
203
204
205
206u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
207{
208 u32 mask_bits = desc->masked;
209
210 if (pci_msi_ignore_mask)
211 return 0;
212
213 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
214 if (flag)
215 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
216 writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL);
217
218 return mask_bits;
219}
220
221static void msix_mask_irq(struct msi_desc *desc, u32 flag)
222{
223 desc->masked = __pci_msix_desc_mask_irq(desc, flag);
224}
225
226static void msi_set_mask_bit(struct irq_data *data, u32 flag)
227{
228 struct msi_desc *desc = irq_data_get_msi_desc(data);
229
230 if (desc->msi_attrib.is_msix) {
231 msix_mask_irq(desc, flag);
232 readl(desc->mask_base);
233 } else {
234 unsigned offset = data->irq - desc->irq;
235 msi_mask_irq(desc, 1 << offset, flag << offset);
236 }
237}
238
239
240
241
242
243void pci_msi_mask_irq(struct irq_data *data)
244{
245 msi_set_mask_bit(data, 1);
246}
247EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
248
249
250
251
252
253void pci_msi_unmask_irq(struct irq_data *data)
254{
255 msi_set_mask_bit(data, 0);
256}
257EXPORT_SYMBOL_GPL(pci_msi_unmask_irq);
258
259void default_restore_msi_irqs(struct pci_dev *dev)
260{
261 struct msi_desc *entry;
262
263 for_each_pci_msi_entry(entry, dev)
264 default_restore_msi_irq(dev, entry->irq);
265}
266
267void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
268{
269 struct pci_dev *dev = msi_desc_to_pci_dev(entry);
270
271 BUG_ON(dev->current_state != PCI_D0);
272
273 if (entry->msi_attrib.is_msix) {
274 void __iomem *base = pci_msix_desc_addr(entry);
275
276 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
277 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
278 msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
279 } else {
280 int pos = dev->msi_cap;
281 u16 data;
282
283 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
284 &msg->address_lo);
285 if (entry->msi_attrib.is_64) {
286 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
287 &msg->address_hi);
288 pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
289 } else {
290 msg->address_hi = 0;
291 pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
292 }
293 msg->data = data;
294 }
295}
296
297void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
298{
299 struct pci_dev *dev = msi_desc_to_pci_dev(entry);
300
301 if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) {
302
303 } else if (entry->msi_attrib.is_msix) {
304 void __iomem *base = pci_msix_desc_addr(entry);
305
306 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
307 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
308 writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
309 } else {
310 int pos = dev->msi_cap;
311 u16 msgctl;
312
313 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
314 msgctl &= ~PCI_MSI_FLAGS_QSIZE;
315 msgctl |= entry->msi_attrib.multiple << 4;
316 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
317
318 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
319 msg->address_lo);
320 if (entry->msi_attrib.is_64) {
321 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
322 msg->address_hi);
323 pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
324 msg->data);
325 } else {
326 pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
327 msg->data);
328 }
329 }
330 entry->msg = *msg;
331}
332
333void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
334{
335 struct msi_desc *entry = irq_get_msi_desc(irq);
336
337 __pci_write_msi_msg(entry, msg);
338}
339EXPORT_SYMBOL_GPL(pci_write_msi_msg);
340
341static void free_msi_irqs(struct pci_dev *dev)
342{
343 struct list_head *msi_list = dev_to_msi_list(&dev->dev);
344 struct msi_desc *entry, *tmp;
345 struct attribute **msi_attrs;
346 struct device_attribute *dev_attr;
347 int i, count = 0;
348
349 for_each_pci_msi_entry(entry, dev)
350 if (entry->irq)
351 for (i = 0; i < entry->nvec_used; i++)
352 BUG_ON(irq_has_action(entry->irq + i));
353
354 pci_msi_teardown_msi_irqs(dev);
355
356 list_for_each_entry_safe(entry, tmp, msi_list, list) {
357 if (entry->msi_attrib.is_msix) {
358 if (list_is_last(&entry->list, msi_list))
359 iounmap(entry->mask_base);
360 }
361
362 list_del(&entry->list);
363 free_msi_entry(entry);
364 }
365
366 if (dev->msi_irq_groups) {
367 sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
368 msi_attrs = dev->msi_irq_groups[0]->attrs;
369 while (msi_attrs[count]) {
370 dev_attr = container_of(msi_attrs[count],
371 struct device_attribute, attr);
372 kfree(dev_attr->attr.name);
373 kfree(dev_attr);
374 ++count;
375 }
376 kfree(msi_attrs);
377 kfree(dev->msi_irq_groups[0]);
378 kfree(dev->msi_irq_groups);
379 dev->msi_irq_groups = NULL;
380 }
381}
382
383static void pci_intx_for_msi(struct pci_dev *dev, int enable)
384{
385 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
386 pci_intx(dev, enable);
387}
388
389static void __pci_restore_msi_state(struct pci_dev *dev)
390{
391 u16 control;
392 struct msi_desc *entry;
393
394 if (!dev->msi_enabled)
395 return;
396
397 entry = irq_get_msi_desc(dev->irq);
398
399 pci_intx_for_msi(dev, 0);
400 pci_msi_set_enable(dev, 0);
401 arch_restore_msi_irqs(dev);
402
403 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
404 msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
405 entry->masked);
406 control &= ~PCI_MSI_FLAGS_QSIZE;
407 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
408 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
409}
410
411static void __pci_restore_msix_state(struct pci_dev *dev)
412{
413 struct msi_desc *entry;
414
415 if (!dev->msix_enabled)
416 return;
417 BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
418
419
420 pci_intx_for_msi(dev, 0);
421 pci_msix_clear_and_set_ctrl(dev, 0,
422 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
423
424 arch_restore_msi_irqs(dev);
425 for_each_pci_msi_entry(entry, dev)
426 msix_mask_irq(entry, entry->masked);
427
428 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
429}
430
431void pci_restore_msi_state(struct pci_dev *dev)
432{
433 __pci_restore_msi_state(dev);
434 __pci_restore_msix_state(dev);
435}
436EXPORT_SYMBOL_GPL(pci_restore_msi_state);
437
438static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
439 char *buf)
440{
441 struct msi_desc *entry;
442 unsigned long irq;
443 int retval;
444
445 retval = kstrtoul(attr->attr.name, 10, &irq);
446 if (retval)
447 return retval;
448
449 entry = irq_get_msi_desc(irq);
450 if (entry)
451 return sprintf(buf, "%s\n",
452 entry->msi_attrib.is_msix ? "msix" : "msi");
453
454 return -ENODEV;
455}
456
457static int populate_msi_sysfs(struct pci_dev *pdev)
458{
459 struct attribute **msi_attrs;
460 struct attribute *msi_attr;
461 struct device_attribute *msi_dev_attr;
462 struct attribute_group *msi_irq_group;
463 const struct attribute_group **msi_irq_groups;
464 struct msi_desc *entry;
465 int ret = -ENOMEM;
466 int num_msi = 0;
467 int count = 0;
468 int i;
469
470
471 for_each_pci_msi_entry(entry, pdev)
472 num_msi += entry->nvec_used;
473 if (!num_msi)
474 return 0;
475
476
477 msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
478 if (!msi_attrs)
479 return -ENOMEM;
480 for_each_pci_msi_entry(entry, pdev) {
481 for (i = 0; i < entry->nvec_used; i++) {
482 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
483 if (!msi_dev_attr)
484 goto error_attrs;
485 msi_attrs[count] = &msi_dev_attr->attr;
486
487 sysfs_attr_init(&msi_dev_attr->attr);
488 msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
489 entry->irq + i);
490 if (!msi_dev_attr->attr.name)
491 goto error_attrs;
492 msi_dev_attr->attr.mode = S_IRUGO;
493 msi_dev_attr->show = msi_mode_show;
494 ++count;
495 }
496 }
497
498 msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
499 if (!msi_irq_group)
500 goto error_attrs;
501 msi_irq_group->name = "msi_irqs";
502 msi_irq_group->attrs = msi_attrs;
503
504 msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
505 if (!msi_irq_groups)
506 goto error_irq_group;
507 msi_irq_groups[0] = msi_irq_group;
508
509 ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
510 if (ret)
511 goto error_irq_groups;
512 pdev->msi_irq_groups = msi_irq_groups;
513
514 return 0;
515
516error_irq_groups:
517 kfree(msi_irq_groups);
518error_irq_group:
519 kfree(msi_irq_group);
520error_attrs:
521 count = 0;
522 msi_attr = msi_attrs[count];
523 while (msi_attr) {
524 msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
525 kfree(msi_attr->name);
526 kfree(msi_dev_attr);
527 ++count;
528 msi_attr = msi_attrs[count];
529 }
530 kfree(msi_attrs);
531 return ret;
532}
533
534static struct msi_desc *
535msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
536{
537 struct cpumask *masks = NULL;
538 struct msi_desc *entry;
539 u16 control;
540
541 if (affd)
542 masks = irq_create_affinity_masks(nvec, affd);
543
544
545
546 entry = alloc_msi_entry(&dev->dev, nvec, masks);
547 if (!entry)
548 goto out;
549
550 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
551
552 entry->msi_attrib.is_msix = 0;
553 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
554 entry->msi_attrib.entry_nr = 0;
555 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
556 entry->msi_attrib.default_irq = dev->irq;
557 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
558 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
559
560 if (control & PCI_MSI_FLAGS_64BIT)
561 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
562 else
563 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
564
565
566 if (entry->msi_attrib.maskbit)
567 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
568
569out:
570 kfree(masks);
571 return entry;
572}
573
574static int msi_verify_entries(struct pci_dev *dev)
575{
576 struct msi_desc *entry;
577
578 for_each_pci_msi_entry(entry, dev) {
579 if (!dev->no_64bit_msi || !entry->msg.address_hi)
580 continue;
581 pci_err(dev, "Device has broken 64-bit MSI but arch"
582 " tried to assign one above 4G\n");
583 return -EIO;
584 }
585 return 0;
586}
587
588
589
590
591
592
593
594
595
596
597
598
599
600static int msi_capability_init(struct pci_dev *dev, int nvec,
601 const struct irq_affinity *affd)
602{
603 struct msi_desc *entry;
604 int ret;
605 unsigned mask;
606
607 pci_msi_set_enable(dev, 0);
608
609 entry = msi_setup_entry(dev, nvec, affd);
610 if (!entry)
611 return -ENOMEM;
612
613
614 mask = msi_mask(entry->msi_attrib.multi_cap);
615 msi_mask_irq(entry, mask, mask);
616
617 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
618
619
620 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
621 if (ret) {
622 msi_mask_irq(entry, mask, ~mask);
623 free_msi_irqs(dev);
624 return ret;
625 }
626
627 ret = msi_verify_entries(dev);
628 if (ret) {
629 msi_mask_irq(entry, mask, ~mask);
630 free_msi_irqs(dev);
631 return ret;
632 }
633
634 ret = populate_msi_sysfs(dev);
635 if (ret) {
636 msi_mask_irq(entry, mask, ~mask);
637 free_msi_irqs(dev);
638 return ret;
639 }
640
641
642 pci_intx_for_msi(dev, 0);
643 pci_msi_set_enable(dev, 1);
644 dev->msi_enabled = 1;
645
646 pcibios_free_irq(dev);
647 dev->irq = entry->irq;
648 return 0;
649}
650
651static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
652{
653 resource_size_t phys_addr;
654 u32 table_offset;
655 unsigned long flags;
656 u8 bir;
657
658 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
659 &table_offset);
660 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
661 flags = pci_resource_flags(dev, bir);
662 if (!flags || (flags & IORESOURCE_UNSET))
663 return NULL;
664
665 table_offset &= PCI_MSIX_TABLE_OFFSET;
666 phys_addr = pci_resource_start(dev, bir) + table_offset;
667
668 return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
669}
670
671static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
672 struct msix_entry *entries, int nvec,
673 const struct irq_affinity *affd)
674{
675 struct cpumask *curmsk, *masks = NULL;
676 struct msi_desc *entry;
677 int ret, i;
678
679 if (affd)
680 masks = irq_create_affinity_masks(nvec, affd);
681
682 for (i = 0, curmsk = masks; i < nvec; i++) {
683 entry = alloc_msi_entry(&dev->dev, 1, curmsk);
684 if (!entry) {
685 if (!i)
686 iounmap(base);
687 else
688 free_msi_irqs(dev);
689
690 ret = -ENOMEM;
691 goto out;
692 }
693
694 entry->msi_attrib.is_msix = 1;
695 entry->msi_attrib.is_64 = 1;
696 if (entries)
697 entry->msi_attrib.entry_nr = entries[i].entry;
698 else
699 entry->msi_attrib.entry_nr = i;
700 entry->msi_attrib.default_irq = dev->irq;
701 entry->mask_base = base;
702
703 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
704 if (masks)
705 curmsk++;
706 }
707 ret = 0;
708out:
709 kfree(masks);
710 return ret;
711}
712
713static void msix_program_entries(struct pci_dev *dev,
714 struct msix_entry *entries)
715{
716 struct msi_desc *entry;
717 int i = 0;
718
719 for_each_pci_msi_entry(entry, dev) {
720 if (entries)
721 entries[i++].vector = entry->irq;
722 entry->masked = readl(pci_msix_desc_addr(entry) +
723 PCI_MSIX_ENTRY_VECTOR_CTRL);
724 msix_mask_irq(entry, 1);
725 }
726}
727
728
729
730
731
732
733
734
735
736
737
738
739static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
740 int nvec, const struct irq_affinity *affd)
741{
742 int ret;
743 u16 control;
744 void __iomem *base;
745
746
747 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
748
749 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
750
751 base = msix_map_region(dev, msix_table_size(control));
752 if (!base)
753 return -ENOMEM;
754
755 ret = msix_setup_entries(dev, base, entries, nvec, affd);
756 if (ret)
757 return ret;
758
759 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
760 if (ret)
761 goto out_avail;
762
763
764 ret = msi_verify_entries(dev);
765 if (ret)
766 goto out_free;
767
768
769
770
771
772
773 pci_msix_clear_and_set_ctrl(dev, 0,
774 PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
775
776 msix_program_entries(dev, entries);
777
778 ret = populate_msi_sysfs(dev);
779 if (ret)
780 goto out_free;
781
782
783 pci_intx_for_msi(dev, 0);
784 dev->msix_enabled = 1;
785 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
786
787 pcibios_free_irq(dev);
788 return 0;
789
790out_avail:
791 if (ret < 0) {
792
793
794
795
796 struct msi_desc *entry;
797 int avail = 0;
798
799 for_each_pci_msi_entry(entry, dev) {
800 if (entry->irq != 0)
801 avail++;
802 }
803 if (avail != 0)
804 ret = avail;
805 }
806
807out_free:
808 free_msi_irqs(dev);
809
810 return ret;
811}
812
813
814
815
816
817
818
819
820
821
822static int pci_msi_supported(struct pci_dev *dev, int nvec)
823{
824 struct pci_bus *bus;
825
826
827 if (!pci_msi_enable)
828 return 0;
829
830 if (!dev || dev->no_msi || dev->current_state != PCI_D0)
831 return 0;
832
833
834
835
836
837
838 if (nvec < 1)
839 return 0;
840
841
842
843
844
845
846
847
848 for (bus = dev->bus; bus; bus = bus->parent)
849 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
850 return 0;
851
852 return 1;
853}
854
855
856
857
858
859
860
861
862
863
864
865int pci_msi_vec_count(struct pci_dev *dev)
866{
867 int ret;
868 u16 msgctl;
869
870 if (!dev->msi_cap)
871 return -EINVAL;
872
873 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
874 ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
875
876 return ret;
877}
878EXPORT_SYMBOL(pci_msi_vec_count);
879
880static void pci_msi_shutdown(struct pci_dev *dev)
881{
882 struct msi_desc *desc;
883 u32 mask;
884
885 if (!pci_msi_enable || !dev || !dev->msi_enabled)
886 return;
887
888 BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
889 desc = first_pci_msi_entry(dev);
890
891 pci_msi_set_enable(dev, 0);
892 pci_intx_for_msi(dev, 1);
893 dev->msi_enabled = 0;
894
895
896 mask = msi_mask(desc->msi_attrib.multi_cap);
897
898 __pci_msi_desc_mask_irq(desc, mask, ~mask);
899
900
901 dev->irq = desc->msi_attrib.default_irq;
902 pcibios_alloc_irq(dev);
903}
904
905void pci_disable_msi(struct pci_dev *dev)
906{
907 if (!pci_msi_enable || !dev || !dev->msi_enabled)
908 return;
909
910 pci_msi_shutdown(dev);
911 free_msi_irqs(dev);
912}
913EXPORT_SYMBOL(pci_disable_msi);
914
915
916
917
918
919
920
921
922
923int pci_msix_vec_count(struct pci_dev *dev)
924{
925 u16 control;
926
927 if (!dev->msix_cap)
928 return -EINVAL;
929
930 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
931 return msix_table_size(control);
932}
933EXPORT_SYMBOL(pci_msix_vec_count);
934
935static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
936 int nvec, const struct irq_affinity *affd)
937{
938 int nr_entries;
939 int i, j;
940
941 if (!pci_msi_supported(dev, nvec))
942 return -EINVAL;
943
944 nr_entries = pci_msix_vec_count(dev);
945 if (nr_entries < 0)
946 return nr_entries;
947 if (nvec > nr_entries)
948 return nr_entries;
949
950 if (entries) {
951
952 for (i = 0; i < nvec; i++) {
953 if (entries[i].entry >= nr_entries)
954 return -EINVAL;
955 for (j = i + 1; j < nvec; j++) {
956 if (entries[i].entry == entries[j].entry)
957 return -EINVAL;
958 }
959 }
960 }
961 WARN_ON(!!dev->msix_enabled);
962
963
964 if (dev->msi_enabled) {
965 pci_info(dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
966 return -EINVAL;
967 }
968 return msix_capability_init(dev, entries, nvec, affd);
969}
970
971static void pci_msix_shutdown(struct pci_dev *dev)
972{
973 struct msi_desc *entry;
974
975 if (!pci_msi_enable || !dev || !dev->msix_enabled)
976 return;
977
978 if (pci_dev_is_disconnected(dev)) {
979 dev->msix_enabled = 0;
980 return;
981 }
982
983
984 for_each_pci_msi_entry(entry, dev) {
985
986 __pci_msix_desc_mask_irq(entry, 1);
987 }
988
989 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
990 pci_intx_for_msi(dev, 1);
991 dev->msix_enabled = 0;
992 pcibios_alloc_irq(dev);
993}
994
995void pci_disable_msix(struct pci_dev *dev)
996{
997 if (!pci_msi_enable || !dev || !dev->msix_enabled)
998 return;
999
1000 pci_msix_shutdown(dev);
1001 free_msi_irqs(dev);
1002}
1003EXPORT_SYMBOL(pci_disable_msix);
1004
1005void pci_no_msi(void)
1006{
1007 pci_msi_enable = 0;
1008}
1009
1010
1011
1012
1013
1014
1015
1016int pci_msi_enabled(void)
1017{
1018 return pci_msi_enable;
1019}
1020EXPORT_SYMBOL(pci_msi_enabled);
1021
1022static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1023 const struct irq_affinity *affd)
1024{
1025 int nvec;
1026 int rc;
1027
1028 if (!pci_msi_supported(dev, minvec))
1029 return -EINVAL;
1030
1031 WARN_ON(!!dev->msi_enabled);
1032
1033
1034 if (dev->msix_enabled) {
1035 pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
1036 return -EINVAL;
1037 }
1038
1039 if (maxvec < minvec)
1040 return -ERANGE;
1041
1042 nvec = pci_msi_vec_count(dev);
1043 if (nvec < 0)
1044 return nvec;
1045 if (nvec < minvec)
1046 return -ENOSPC;
1047
1048 if (nvec > maxvec)
1049 nvec = maxvec;
1050
1051 for (;;) {
1052 if (affd) {
1053 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
1054 if (nvec < minvec)
1055 return -ENOSPC;
1056 }
1057
1058 rc = msi_capability_init(dev, nvec, affd);
1059 if (rc == 0)
1060 return nvec;
1061
1062 if (rc < 0)
1063 return rc;
1064 if (rc < minvec)
1065 return -ENOSPC;
1066
1067 nvec = rc;
1068 }
1069}
1070
1071
1072int pci_enable_msi(struct pci_dev *dev)
1073{
1074 int rc = __pci_enable_msi_range(dev, 1, 1, NULL);
1075 if (rc < 0)
1076 return rc;
1077 return 0;
1078}
1079EXPORT_SYMBOL(pci_enable_msi);
1080
1081static int __pci_enable_msix_range(struct pci_dev *dev,
1082 struct msix_entry *entries, int minvec,
1083 int maxvec, const struct irq_affinity *affd)
1084{
1085 int rc, nvec = maxvec;
1086
1087 if (maxvec < minvec)
1088 return -ERANGE;
1089
1090 for (;;) {
1091 if (affd) {
1092 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
1093 if (nvec < minvec)
1094 return -ENOSPC;
1095 }
1096
1097 rc = __pci_enable_msix(dev, entries, nvec, affd);
1098 if (rc == 0)
1099 return nvec;
1100
1101 if (rc < 0)
1102 return rc;
1103 if (rc < minvec)
1104 return -ENOSPC;
1105
1106 nvec = rc;
1107 }
1108}
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1126 int minvec, int maxvec)
1127{
1128 return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL);
1129}
1130EXPORT_SYMBOL(pci_enable_msix_range);
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1151 unsigned int max_vecs, unsigned int flags,
1152 const struct irq_affinity *affd)
1153{
1154 static const struct irq_affinity msi_default_affd;
1155 int vecs = -ENOSPC;
1156
1157 if (flags & PCI_IRQ_AFFINITY) {
1158 if (!affd)
1159 affd = &msi_default_affd;
1160 } else {
1161 if (WARN_ON(affd))
1162 affd = NULL;
1163 }
1164
1165 if (flags & PCI_IRQ_MSIX) {
1166 vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
1167 affd);
1168 if (vecs > 0)
1169 return vecs;
1170 }
1171
1172 if (flags & PCI_IRQ_MSI) {
1173 vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
1174 if (vecs > 0)
1175 return vecs;
1176 }
1177
1178
1179 if (flags & PCI_IRQ_LEGACY) {
1180 if (min_vecs == 1 && dev->irq) {
1181 pci_intx(dev, 1);
1182 return 1;
1183 }
1184 }
1185
1186 return vecs;
1187}
1188EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
1189
1190
1191
1192
1193
1194
1195
1196void pci_free_irq_vectors(struct pci_dev *dev)
1197{
1198 pci_disable_msix(dev);
1199 pci_disable_msi(dev);
1200}
1201EXPORT_SYMBOL(pci_free_irq_vectors);
1202
1203
1204
1205
1206
1207
1208int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1209{
1210 if (dev->msix_enabled) {
1211 struct msi_desc *entry;
1212 int i = 0;
1213
1214 for_each_pci_msi_entry(entry, dev) {
1215 if (i == nr)
1216 return entry->irq;
1217 i++;
1218 }
1219 WARN_ON_ONCE(1);
1220 return -EINVAL;
1221 }
1222
1223 if (dev->msi_enabled) {
1224 struct msi_desc *entry = first_pci_msi_entry(dev);
1225
1226 if (WARN_ON_ONCE(nr >= entry->nvec_used))
1227 return -EINVAL;
1228 } else {
1229 if (WARN_ON_ONCE(nr > 0))
1230 return -EINVAL;
1231 }
1232
1233 return dev->irq + nr;
1234}
1235EXPORT_SYMBOL(pci_irq_vector);
1236
1237
1238
1239
1240
1241
1242const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
1243{
1244 if (dev->msix_enabled) {
1245 struct msi_desc *entry;
1246 int i = 0;
1247
1248 for_each_pci_msi_entry(entry, dev) {
1249 if (i == nr)
1250 return entry->affinity;
1251 i++;
1252 }
1253 WARN_ON_ONCE(1);
1254 return NULL;
1255 } else if (dev->msi_enabled) {
1256 struct msi_desc *entry = first_pci_msi_entry(dev);
1257
1258 if (WARN_ON_ONCE(!entry || !entry->affinity ||
1259 nr >= entry->nvec_used))
1260 return NULL;
1261
1262 return &entry->affinity[nr];
1263 } else {
1264 return cpu_possible_mask;
1265 }
1266}
1267EXPORT_SYMBOL(pci_irq_get_affinity);
1268
1269
1270
1271
1272
1273
1274int pci_irq_get_node(struct pci_dev *pdev, int vec)
1275{
1276 const struct cpumask *mask;
1277
1278 mask = pci_irq_get_affinity(pdev, vec);
1279 if (mask)
1280 return local_memory_node(cpu_to_node(cpumask_first(mask)));
1281 return dev_to_node(&pdev->dev);
1282}
1283EXPORT_SYMBOL(pci_irq_get_node);
1284
1285struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
1286{
1287 return to_pci_dev(desc->dev);
1288}
1289EXPORT_SYMBOL(msi_desc_to_pci_dev);
1290
1291void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
1292{
1293 struct pci_dev *dev = msi_desc_to_pci_dev(desc);
1294
1295 return dev->bus->sysdata;
1296}
1297EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
1298
1299#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
1300
1301
1302
1303
1304
1305void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
1306{
1307 struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
1308
1309
1310
1311
1312
1313 if (desc->irq == irq_data->irq)
1314 __pci_write_msi_msg(desc, msg);
1315}
1316
1317
1318
1319
1320
1321
1322
1323
1324irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
1325 struct msi_desc *desc)
1326{
1327 return (irq_hw_number_t)desc->msi_attrib.entry_nr |
1328 PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
1329 (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
1330}
1331
1332static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
1333{
1334 return !desc->msi_attrib.is_msix && desc->nvec_used > 1;
1335}
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348int pci_msi_domain_check_cap(struct irq_domain *domain,
1349 struct msi_domain_info *info, struct device *dev)
1350{
1351 struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
1352
1353
1354 if (pci_msi_desc_is_multi_msi(desc) &&
1355 !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
1356 return 1;
1357 else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX))
1358 return -ENOTSUPP;
1359
1360 return 0;
1361}
1362
1363static int pci_msi_domain_handle_error(struct irq_domain *domain,
1364 struct msi_desc *desc, int error)
1365{
1366
1367 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
1368 return 1;
1369
1370 return error;
1371}
1372
1373#ifdef GENERIC_MSI_DOMAIN_OPS
1374static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
1375 struct msi_desc *desc)
1376{
1377 arg->desc = desc;
1378 arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc),
1379 desc);
1380}
1381#else
1382#define pci_msi_domain_set_desc NULL
1383#endif
1384
1385static struct msi_domain_ops pci_msi_domain_ops_default = {
1386 .set_desc = pci_msi_domain_set_desc,
1387 .msi_check = pci_msi_domain_check_cap,
1388 .handle_error = pci_msi_domain_handle_error,
1389};
1390
1391static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
1392{
1393 struct msi_domain_ops *ops = info->ops;
1394
1395 if (ops == NULL) {
1396 info->ops = &pci_msi_domain_ops_default;
1397 } else {
1398 if (ops->set_desc == NULL)
1399 ops->set_desc = pci_msi_domain_set_desc;
1400 if (ops->msi_check == NULL)
1401 ops->msi_check = pci_msi_domain_check_cap;
1402 if (ops->handle_error == NULL)
1403 ops->handle_error = pci_msi_domain_handle_error;
1404 }
1405}
1406
1407static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
1408{
1409 struct irq_chip *chip = info->chip;
1410
1411 BUG_ON(!chip);
1412 if (!chip->irq_write_msi_msg)
1413 chip->irq_write_msi_msg = pci_msi_domain_write_msg;
1414 if (!chip->irq_mask)
1415 chip->irq_mask = pci_msi_mask_irq;
1416 if (!chip->irq_unmask)
1417 chip->irq_unmask = pci_msi_unmask_irq;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
1432 struct msi_domain_info *info,
1433 struct irq_domain *parent)
1434{
1435 struct irq_domain *domain;
1436
1437 if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE))
1438 info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
1439
1440 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
1441 pci_msi_domain_update_dom_ops(info);
1442 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
1443 pci_msi_domain_update_chip_ops(info);
1444
1445 info->flags |= MSI_FLAG_ACTIVATE_EARLY;
1446 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
1447 info->flags |= MSI_FLAG_MUST_REACTIVATE;
1448
1449
1450 info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
1451
1452 domain = msi_create_irq_domain(fwnode, info, parent);
1453 if (!domain)
1454 return NULL;
1455
1456 irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI);
1457 return domain;
1458}
1459EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
1475{
1476 u32 *pa = data;
1477 u8 bus = PCI_BUS_NUM(*pa);
1478
1479 if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus)
1480 *pa = alias;
1481
1482 return 0;
1483}
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
1496{
1497 struct device_node *of_node;
1498 u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
1499
1500 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
1501
1502 of_node = irq_domain_get_of_node(domain);
1503 rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) :
1504 iort_msi_map_rid(&pdev->dev, rid);
1505
1506 return rid;
1507}
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
1519{
1520 struct irq_domain *dom;
1521 u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
1522
1523 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
1524 dom = of_msi_map_get_device_domain(&pdev->dev, rid);
1525 if (!dom)
1526 dom = iort_get_device_domain(&pdev->dev, rid);
1527 return dom;
1528}
1529#endif
1530