1
2
3#define pr_fmt(fmt) "DMAR-IR: " fmt
4
5#include <linux/interrupt.h>
6#include <linux/dmar.h>
7#include <linux/spinlock.h>
8#include <linux/slab.h>
9#include <linux/jiffies.h>
10#include <linux/hpet.h>
11#include <linux/pci.h>
12#include <linux/irq.h>
13#include <linux/intel-iommu.h>
14#include <linux/acpi.h>
15#include <linux/irqdomain.h>
16#include <linux/crash_dump.h>
17#include <asm/io_apic.h>
18#include <asm/apic.h>
19#include <asm/smp.h>
20#include <asm/cpu.h>
21#include <asm/irq_remapping.h>
22#include <asm/pci-direct.h>
23
24#include "../irq_remapping.h"
25#include "cap_audit.h"
26
27enum irq_mode {
28 IRQ_REMAPPING,
29 IRQ_POSTING,
30};
31
32struct ioapic_scope {
33 struct intel_iommu *iommu;
34 unsigned int id;
35 unsigned int bus;
36 unsigned int devfn;
37};
38
39struct hpet_scope {
40 struct intel_iommu *iommu;
41 u8 id;
42 unsigned int bus;
43 unsigned int devfn;
44};
45
46struct irq_2_iommu {
47 struct intel_iommu *iommu;
48 u16 irte_index;
49 u16 sub_handle;
50 u8 irte_mask;
51 enum irq_mode mode;
52};
53
54struct intel_ir_data {
55 struct irq_2_iommu irq_2_iommu;
56 struct irte irte_entry;
57 union {
58 struct msi_msg msi_entry;
59 };
60};
61
62#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
63#define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
64
65static int __read_mostly eim_mode;
66static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
67static struct hpet_scope ir_hpet[MAX_HPET_TBS];
68
69
70
71
72
73
74
75
76
77
78
79
80DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
81static const struct irq_domain_ops intel_ir_domain_ops;
82
83static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
84static int __init parse_ioapics_under_ir(void);
85
86static bool ir_pre_enabled(struct intel_iommu *iommu)
87{
88 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED);
89}
90
91static void clear_ir_pre_enabled(struct intel_iommu *iommu)
92{
93 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
94}
95
96static void init_ir_status(struct intel_iommu *iommu)
97{
98 u32 gsts;
99
100 gsts = readl(iommu->reg + DMAR_GSTS_REG);
101 if (gsts & DMA_GSTS_IRES)
102 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
103}
104
105static int alloc_irte(struct intel_iommu *iommu,
106 struct irq_2_iommu *irq_iommu, u16 count)
107{
108 struct ir_table *table = iommu->ir_table;
109 unsigned int mask = 0;
110 unsigned long flags;
111 int index;
112
113 if (!count || !irq_iommu)
114 return -1;
115
116 if (count > 1) {
117 count = __roundup_pow_of_two(count);
118 mask = ilog2(count);
119 }
120
121 if (mask > ecap_max_handle_mask(iommu->ecap)) {
122 pr_err("Requested mask %x exceeds the max invalidation handle"
123 " mask value %Lx\n", mask,
124 ecap_max_handle_mask(iommu->ecap));
125 return -1;
126 }
127
128 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
129 index = bitmap_find_free_region(table->bitmap,
130 INTR_REMAP_TABLE_ENTRIES, mask);
131 if (index < 0) {
132 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
133 } else {
134 irq_iommu->iommu = iommu;
135 irq_iommu->irte_index = index;
136 irq_iommu->sub_handle = 0;
137 irq_iommu->irte_mask = mask;
138 irq_iommu->mode = IRQ_REMAPPING;
139 }
140 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
141
142 return index;
143}
144
145static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
146{
147 struct qi_desc desc;
148
149 desc.qw0 = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
150 | QI_IEC_SELECTIVE;
151 desc.qw1 = 0;
152 desc.qw2 = 0;
153 desc.qw3 = 0;
154
155 return qi_submit_sync(iommu, &desc, 1, 0);
156}
157
158static int modify_irte(struct irq_2_iommu *irq_iommu,
159 struct irte *irte_modified)
160{
161 struct intel_iommu *iommu;
162 unsigned long flags;
163 struct irte *irte;
164 int rc, index;
165
166 if (!irq_iommu)
167 return -1;
168
169 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
170
171 iommu = irq_iommu->iommu;
172
173 index = irq_iommu->irte_index + irq_iommu->sub_handle;
174 irte = &iommu->ir_table->base[index];
175
176#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
177 if ((irte->pst == 1) || (irte_modified->pst == 1)) {
178 bool ret;
179
180 ret = cmpxchg_double(&irte->low, &irte->high,
181 irte->low, irte->high,
182 irte_modified->low, irte_modified->high);
183
184
185
186
187
188
189 WARN_ON(!ret);
190 } else
191#endif
192 {
193 set_64bit(&irte->low, irte_modified->low);
194 set_64bit(&irte->high, irte_modified->high);
195 }
196 __iommu_flush_cache(iommu, irte, sizeof(*irte));
197
198 rc = qi_flush_iec(iommu, index, 0);
199
200
201 irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
202 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
203
204 return rc;
205}
206
207static struct intel_iommu *map_hpet_to_iommu(u8 hpet_id)
208{
209 int i;
210
211 for (i = 0; i < MAX_HPET_TBS; i++) {
212 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
213 return ir_hpet[i].iommu;
214 }
215 return NULL;
216}
217
218static struct intel_iommu *map_ioapic_to_iommu(int apic)
219{
220 int i;
221
222 for (i = 0; i < MAX_IO_APICS; i++) {
223 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
224 return ir_ioapic[i].iommu;
225 }
226 return NULL;
227}
228
229static struct irq_domain *map_dev_to_ir(struct pci_dev *dev)
230{
231 struct dmar_drhd_unit *drhd = dmar_find_matched_drhd_unit(dev);
232
233 return drhd ? drhd->iommu->ir_msi_domain : NULL;
234}
235
236static int clear_entries(struct irq_2_iommu *irq_iommu)
237{
238 struct irte *start, *entry, *end;
239 struct intel_iommu *iommu;
240 int index;
241
242 if (irq_iommu->sub_handle)
243 return 0;
244
245 iommu = irq_iommu->iommu;
246 index = irq_iommu->irte_index;
247
248 start = iommu->ir_table->base + index;
249 end = start + (1 << irq_iommu->irte_mask);
250
251 for (entry = start; entry < end; entry++) {
252 set_64bit(&entry->low, 0);
253 set_64bit(&entry->high, 0);
254 }
255 bitmap_release_region(iommu->ir_table->bitmap, index,
256 irq_iommu->irte_mask);
257
258 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
259}
260
261
262
263
264#define SVT_NO_VERIFY 0x0
265#define SVT_VERIFY_SID_SQ 0x1
266#define SVT_VERIFY_BUS 0x2
267
268
269
270
271#define SQ_ALL_16 0x0
272#define SQ_13_IGNORE_1 0x1
273
274
275#define SQ_13_IGNORE_2 0x2
276
277
278#define SQ_13_IGNORE_3 0x3
279
280
281
282
283
284
285
286static void set_irte_sid(struct irte *irte, unsigned int svt,
287 unsigned int sq, unsigned int sid)
288{
289 if (disable_sourceid_checking)
290 svt = SVT_NO_VERIFY;
291 irte->svt = svt;
292 irte->sq = sq;
293 irte->sid = sid;
294}
295
296
297
298
299
300
301static void set_irte_verify_bus(struct irte *irte, unsigned int start_bus,
302 unsigned int end_bus)
303{
304 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
305 (start_bus << 8) | end_bus);
306}
307
308static int set_ioapic_sid(struct irte *irte, int apic)
309{
310 int i;
311 u16 sid = 0;
312
313 if (!irte)
314 return -1;
315
316 down_read(&dmar_global_lock);
317 for (i = 0; i < MAX_IO_APICS; i++) {
318 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
319 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
320 break;
321 }
322 }
323 up_read(&dmar_global_lock);
324
325 if (sid == 0) {
326 pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic);
327 return -1;
328 }
329
330 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
331
332 return 0;
333}
334
335static int set_hpet_sid(struct irte *irte, u8 id)
336{
337 int i;
338 u16 sid = 0;
339
340 if (!irte)
341 return -1;
342
343 down_read(&dmar_global_lock);
344 for (i = 0; i < MAX_HPET_TBS; i++) {
345 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
346 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
347 break;
348 }
349 }
350 up_read(&dmar_global_lock);
351
352 if (sid == 0) {
353 pr_warn("Failed to set source-id of HPET block (%d)\n", id);
354 return -1;
355 }
356
357
358
359
360
361
362 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
363
364 return 0;
365}
366
367struct set_msi_sid_data {
368 struct pci_dev *pdev;
369 u16 alias;
370 int count;
371 int busmatch_count;
372};
373
374static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
375{
376 struct set_msi_sid_data *data = opaque;
377
378 if (data->count == 0 || PCI_BUS_NUM(alias) == PCI_BUS_NUM(data->alias))
379 data->busmatch_count++;
380
381 data->pdev = pdev;
382 data->alias = alias;
383 data->count++;
384
385 return 0;
386}
387
388static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
389{
390 struct set_msi_sid_data data;
391
392 if (!irte || !dev)
393 return -1;
394
395 data.count = 0;
396 data.busmatch_count = 0;
397 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
418 set_irte_verify_bus(irte, PCI_BUS_NUM(data.alias),
419 dev->bus->number);
420 else if (data.count >= 2 && data.busmatch_count == data.count)
421 set_irte_verify_bus(irte, dev->bus->number, dev->bus->number);
422 else if (data.pdev->bus->number != dev->bus->number)
423 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
424 else
425 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
426 pci_dev_id(dev));
427
428 return 0;
429}
430
431static int iommu_load_old_irte(struct intel_iommu *iommu)
432{
433 struct irte *old_ir_table;
434 phys_addr_t irt_phys;
435 unsigned int i;
436 size_t size;
437 u64 irta;
438
439
440 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG);
441 if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK)
442 != INTR_REMAP_TABLE_REG_SIZE)
443 return -EINVAL;
444
445 irt_phys = irta & VTD_PAGE_MASK;
446 size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte);
447
448
449 old_ir_table = memremap(irt_phys, size, MEMREMAP_WB);
450 if (!old_ir_table)
451 return -ENOMEM;
452
453
454 memcpy(iommu->ir_table->base, old_ir_table, size);
455
456 __iommu_flush_cache(iommu, iommu->ir_table->base, size);
457
458
459
460
461
462 for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) {
463 if (iommu->ir_table->base[i].present)
464 bitmap_set(iommu->ir_table->bitmap, i, 1);
465 }
466
467 memunmap(old_ir_table);
468
469 return 0;
470}
471
472
473static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
474{
475 unsigned long flags;
476 u64 addr;
477 u32 sts;
478
479 addr = virt_to_phys((void *)iommu->ir_table->base);
480
481 raw_spin_lock_irqsave(&iommu->register_lock, flags);
482
483 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
484 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
485
486
487 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG);
488
489 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
490 readl, (sts & DMA_GSTS_IRTPS), sts);
491 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
492
493
494
495
496
497 qi_global_iec(iommu);
498}
499
500static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
501{
502 unsigned long flags;
503 u32 sts;
504
505 raw_spin_lock_irqsave(&iommu->register_lock, flags);
506
507
508 iommu->gcmd |= DMA_GCMD_IRE;
509 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
510 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
511 readl, (sts & DMA_GSTS_IRES), sts);
512
513
514 if (sts & DMA_GSTS_CFIS) {
515 iommu->gcmd &= ~DMA_GCMD_CFI;
516 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
517 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
518 readl, !(sts & DMA_GSTS_CFIS), sts);
519 }
520
521
522
523
524
525
526 if (sts & DMA_GSTS_CFIS)
527 WARN(1, KERN_WARNING
528 "Compatibility-format IRQs enabled despite intr remapping;\n"
529 "you are vulnerable to IRQ injection.\n");
530
531 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
532}
533
534static int intel_setup_irq_remapping(struct intel_iommu *iommu)
535{
536 struct ir_table *ir_table;
537 struct fwnode_handle *fn;
538 unsigned long *bitmap;
539 struct page *pages;
540
541 if (iommu->ir_table)
542 return 0;
543
544 ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
545 if (!ir_table)
546 return -ENOMEM;
547
548 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
549 INTR_REMAP_PAGE_ORDER);
550 if (!pages) {
551 pr_err("IR%d: failed to allocate pages of order %d\n",
552 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
553 goto out_free_table;
554 }
555
556 bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
557 if (bitmap == NULL) {
558 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
559 goto out_free_pages;
560 }
561
562 fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
563 if (!fn)
564 goto out_free_bitmap;
565
566 iommu->ir_domain =
567 irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
568 0, INTR_REMAP_TABLE_ENTRIES,
569 fn, &intel_ir_domain_ops,
570 iommu);
571 if (!iommu->ir_domain) {
572 irq_domain_free_fwnode(fn);
573 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
574 goto out_free_bitmap;
575 }
576 iommu->ir_msi_domain =
577 arch_create_remap_msi_irq_domain(iommu->ir_domain,
578 "INTEL-IR-MSI",
579 iommu->seq_id);
580
581 ir_table->base = page_address(pages);
582 ir_table->bitmap = bitmap;
583 iommu->ir_table = ir_table;
584
585
586
587
588
589 if (!iommu->qi) {
590
591
592
593 dmar_fault(-1, iommu);
594 dmar_disable_qi(iommu);
595
596 if (dmar_enable_qi(iommu)) {
597 pr_err("Failed to enable queued invalidation\n");
598 goto out_free_bitmap;
599 }
600 }
601
602 init_ir_status(iommu);
603
604 if (ir_pre_enabled(iommu)) {
605 if (!is_kdump_kernel()) {
606 pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
607 iommu->name);
608 clear_ir_pre_enabled(iommu);
609 iommu_disable_irq_remapping(iommu);
610 } else if (iommu_load_old_irte(iommu))
611 pr_err("Failed to copy IR table for %s from previous kernel\n",
612 iommu->name);
613 else
614 pr_info("Copied IR table for %s from previous kernel\n",
615 iommu->name);
616 }
617
618 iommu_set_irq_remapping(iommu, eim_mode);
619
620 return 0;
621
622out_free_bitmap:
623 bitmap_free(bitmap);
624out_free_pages:
625 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
626out_free_table:
627 kfree(ir_table);
628
629 iommu->ir_table = NULL;
630
631 return -ENOMEM;
632}
633
634static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
635{
636 struct fwnode_handle *fn;
637
638 if (iommu && iommu->ir_table) {
639 if (iommu->ir_msi_domain) {
640 fn = iommu->ir_msi_domain->fwnode;
641
642 irq_domain_remove(iommu->ir_msi_domain);
643 irq_domain_free_fwnode(fn);
644 iommu->ir_msi_domain = NULL;
645 }
646 if (iommu->ir_domain) {
647 fn = iommu->ir_domain->fwnode;
648
649 irq_domain_remove(iommu->ir_domain);
650 irq_domain_free_fwnode(fn);
651 iommu->ir_domain = NULL;
652 }
653 free_pages((unsigned long)iommu->ir_table->base,
654 INTR_REMAP_PAGE_ORDER);
655 bitmap_free(iommu->ir_table->bitmap);
656 kfree(iommu->ir_table);
657 iommu->ir_table = NULL;
658 }
659}
660
661
662
663
664static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
665{
666 unsigned long flags;
667 u32 sts;
668
669 if (!ecap_ir_support(iommu->ecap))
670 return;
671
672
673
674
675
676 qi_global_iec(iommu);
677
678 raw_spin_lock_irqsave(&iommu->register_lock, flags);
679
680 sts = readl(iommu->reg + DMAR_GSTS_REG);
681 if (!(sts & DMA_GSTS_IRES))
682 goto end;
683
684 iommu->gcmd &= ~DMA_GCMD_IRE;
685 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
686
687 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
688 readl, !(sts & DMA_GSTS_IRES), sts);
689
690end:
691 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
692}
693
694static int __init dmar_x2apic_optout(void)
695{
696 struct acpi_table_dmar *dmar;
697 dmar = (struct acpi_table_dmar *)dmar_tbl;
698 if (!dmar || no_x2apic_optout)
699 return 0;
700 return dmar->flags & DMAR_X2APIC_OPT_OUT;
701}
702
703static void __init intel_cleanup_irq_remapping(void)
704{
705 struct dmar_drhd_unit *drhd;
706 struct intel_iommu *iommu;
707
708 for_each_iommu(iommu, drhd) {
709 if (ecap_ir_support(iommu->ecap)) {
710 iommu_disable_irq_remapping(iommu);
711 intel_teardown_irq_remapping(iommu);
712 }
713 }
714
715 if (x2apic_supported())
716 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
717}
718
719static int __init intel_prepare_irq_remapping(void)
720{
721 struct dmar_drhd_unit *drhd;
722 struct intel_iommu *iommu;
723 int eim = 0;
724
725 if (irq_remap_broken) {
726 pr_warn("This system BIOS has enabled interrupt remapping\n"
727 "on a chipset that contains an erratum making that\n"
728 "feature unstable. To maintain system stability\n"
729 "interrupt remapping is being disabled. Please\n"
730 "contact your BIOS vendor for an update\n");
731 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
732 return -ENODEV;
733 }
734
735 if (dmar_table_init() < 0)
736 return -ENODEV;
737
738 if (intel_cap_audit(CAP_AUDIT_STATIC_IRQR, NULL))
739 return -ENODEV;
740
741 if (!dmar_ir_support())
742 return -ENODEV;
743
744 if (parse_ioapics_under_ir()) {
745 pr_info("Not enabling interrupt remapping\n");
746 goto error;
747 }
748
749
750 for_each_iommu(iommu, drhd)
751 if (!ecap_ir_support(iommu->ecap))
752 goto error;
753
754
755 if (x2apic_supported()) {
756 eim = !dmar_x2apic_optout();
757 if (!eim) {
758 pr_info("x2apic is disabled because BIOS sets x2apic opt out bit.");
759 pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n");
760 }
761 }
762
763 for_each_iommu(iommu, drhd) {
764 if (eim && !ecap_eim_support(iommu->ecap)) {
765 pr_info("%s does not support EIM\n", iommu->name);
766 eim = 0;
767 }
768 }
769
770 eim_mode = eim;
771 if (eim)
772 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
773
774
775 for_each_iommu(iommu, drhd) {
776 if (intel_setup_irq_remapping(iommu)) {
777 pr_err("Failed to setup irq remapping for %s\n",
778 iommu->name);
779 goto error;
780 }
781 }
782
783 return 0;
784
785error:
786 intel_cleanup_irq_remapping();
787 return -ENODEV;
788}
789
790
791
792
793static inline void set_irq_posting_cap(void)
794{
795 struct dmar_drhd_unit *drhd;
796 struct intel_iommu *iommu;
797
798 if (!disable_irq_post) {
799
800
801
802
803
804
805
806
807 if (boot_cpu_has(X86_FEATURE_CX16))
808 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
809
810 for_each_iommu(iommu, drhd)
811 if (!cap_pi_support(iommu->cap)) {
812 intel_irq_remap_ops.capability &=
813 ~(1 << IRQ_POSTING_CAP);
814 break;
815 }
816 }
817}
818
819static int __init intel_enable_irq_remapping(void)
820{
821 struct dmar_drhd_unit *drhd;
822 struct intel_iommu *iommu;
823 bool setup = false;
824
825
826
827
828 for_each_iommu(iommu, drhd) {
829 if (!ir_pre_enabled(iommu))
830 iommu_enable_irq_remapping(iommu);
831 setup = true;
832 }
833
834 if (!setup)
835 goto error;
836
837 irq_remapping_enabled = 1;
838
839 set_irq_posting_cap();
840
841 pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic");
842
843 return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
844
845error:
846 intel_cleanup_irq_remapping();
847 return -1;
848}
849
850static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
851 struct intel_iommu *iommu,
852 struct acpi_dmar_hardware_unit *drhd)
853{
854 struct acpi_dmar_pci_path *path;
855 u8 bus;
856 int count, free = -1;
857
858 bus = scope->bus;
859 path = (struct acpi_dmar_pci_path *)(scope + 1);
860 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
861 / sizeof(struct acpi_dmar_pci_path);
862
863 while (--count > 0) {
864
865
866
867
868 bus = read_pci_config_byte(bus, path->device, path->function,
869 PCI_SECONDARY_BUS);
870 path++;
871 }
872
873 for (count = 0; count < MAX_HPET_TBS; count++) {
874 if (ir_hpet[count].iommu == iommu &&
875 ir_hpet[count].id == scope->enumeration_id)
876 return 0;
877 else if (ir_hpet[count].iommu == NULL && free == -1)
878 free = count;
879 }
880 if (free == -1) {
881 pr_warn("Exceeded Max HPET blocks\n");
882 return -ENOSPC;
883 }
884
885 ir_hpet[free].iommu = iommu;
886 ir_hpet[free].id = scope->enumeration_id;
887 ir_hpet[free].bus = bus;
888 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
889 pr_info("HPET id %d under DRHD base 0x%Lx\n",
890 scope->enumeration_id, drhd->address);
891
892 return 0;
893}
894
895static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
896 struct intel_iommu *iommu,
897 struct acpi_dmar_hardware_unit *drhd)
898{
899 struct acpi_dmar_pci_path *path;
900 u8 bus;
901 int count, free = -1;
902
903 bus = scope->bus;
904 path = (struct acpi_dmar_pci_path *)(scope + 1);
905 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
906 / sizeof(struct acpi_dmar_pci_path);
907
908 while (--count > 0) {
909
910
911
912
913 bus = read_pci_config_byte(bus, path->device, path->function,
914 PCI_SECONDARY_BUS);
915 path++;
916 }
917
918 for (count = 0; count < MAX_IO_APICS; count++) {
919 if (ir_ioapic[count].iommu == iommu &&
920 ir_ioapic[count].id == scope->enumeration_id)
921 return 0;
922 else if (ir_ioapic[count].iommu == NULL && free == -1)
923 free = count;
924 }
925 if (free == -1) {
926 pr_warn("Exceeded Max IO APICS\n");
927 return -ENOSPC;
928 }
929
930 ir_ioapic[free].bus = bus;
931 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
932 ir_ioapic[free].iommu = iommu;
933 ir_ioapic[free].id = scope->enumeration_id;
934 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
935 scope->enumeration_id, drhd->address, iommu->seq_id);
936
937 return 0;
938}
939
940static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
941 struct intel_iommu *iommu)
942{
943 int ret = 0;
944 struct acpi_dmar_hardware_unit *drhd;
945 struct acpi_dmar_device_scope *scope;
946 void *start, *end;
947
948 drhd = (struct acpi_dmar_hardware_unit *)header;
949 start = (void *)(drhd + 1);
950 end = ((void *)drhd) + header->length;
951
952 while (start < end && ret == 0) {
953 scope = start;
954 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
955 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
956 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
957 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
958 start += scope->length;
959 }
960
961 return ret;
962}
963
964static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
965{
966 int i;
967
968 for (i = 0; i < MAX_HPET_TBS; i++)
969 if (ir_hpet[i].iommu == iommu)
970 ir_hpet[i].iommu = NULL;
971
972 for (i = 0; i < MAX_IO_APICS; i++)
973 if (ir_ioapic[i].iommu == iommu)
974 ir_ioapic[i].iommu = NULL;
975}
976
977
978
979
980
981static int __init parse_ioapics_under_ir(void)
982{
983 struct dmar_drhd_unit *drhd;
984 struct intel_iommu *iommu;
985 bool ir_supported = false;
986 int ioapic_idx;
987
988 for_each_iommu(iommu, drhd) {
989 int ret;
990
991 if (!ecap_ir_support(iommu->ecap))
992 continue;
993
994 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu);
995 if (ret)
996 return ret;
997
998 ir_supported = true;
999 }
1000
1001 if (!ir_supported)
1002 return -ENODEV;
1003
1004 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
1005 int ioapic_id = mpc_ioapic_id(ioapic_idx);
1006 if (!map_ioapic_to_iommu(ioapic_id)) {
1007 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
1008 "interrupt remapping will be disabled\n",
1009 ioapic_id);
1010 return -1;
1011 }
1012 }
1013
1014 return 0;
1015}
1016
1017static int __init ir_dev_scope_init(void)
1018{
1019 int ret;
1020
1021 if (!irq_remapping_enabled)
1022 return 0;
1023
1024 down_write(&dmar_global_lock);
1025 ret = dmar_dev_scope_init();
1026 up_write(&dmar_global_lock);
1027
1028 return ret;
1029}
1030rootfs_initcall(ir_dev_scope_init);
1031
1032static void disable_irq_remapping(void)
1033{
1034 struct dmar_drhd_unit *drhd;
1035 struct intel_iommu *iommu = NULL;
1036
1037
1038
1039
1040 for_each_iommu(iommu, drhd) {
1041 if (!ecap_ir_support(iommu->ecap))
1042 continue;
1043
1044 iommu_disable_irq_remapping(iommu);
1045 }
1046
1047
1048
1049
1050 if (!disable_irq_post)
1051 intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
1052}
1053
1054static int reenable_irq_remapping(int eim)
1055{
1056 struct dmar_drhd_unit *drhd;
1057 bool setup = false;
1058 struct intel_iommu *iommu = NULL;
1059
1060 for_each_iommu(iommu, drhd)
1061 if (iommu->qi)
1062 dmar_reenable_qi(iommu);
1063
1064
1065
1066
1067 for_each_iommu(iommu, drhd) {
1068 if (!ecap_ir_support(iommu->ecap))
1069 continue;
1070
1071
1072 iommu_set_irq_remapping(iommu, eim);
1073 iommu_enable_irq_remapping(iommu);
1074 setup = true;
1075 }
1076
1077 if (!setup)
1078 goto error;
1079
1080 set_irq_posting_cap();
1081
1082 return 0;
1083
1084error:
1085
1086
1087
1088 return -1;
1089}
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099void intel_irq_remap_add_device(struct dmar_pci_notify_info *info)
1100{
1101 if (!irq_remapping_enabled || pci_dev_has_special_msi_domain(info->dev))
1102 return;
1103
1104 dev_set_msi_domain(&info->dev->dev, map_dev_to_ir(info->dev));
1105}
1106
1107static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
1108{
1109 memset(irte, 0, sizeof(*irte));
1110
1111 irte->present = 1;
1112 irte->dst_mode = apic->dest_mode_logical;
1113
1114
1115
1116
1117
1118
1119
1120 irte->trigger_mode = 0;
1121 irte->dlvry_mode = apic->delivery_mode;
1122 irte->vector = vector;
1123 irte->dest_id = IRTE_DEST(dest);
1124 irte->redir_hint = 1;
1125}
1126
1127struct irq_remap_ops intel_irq_remap_ops = {
1128 .prepare = intel_prepare_irq_remapping,
1129 .enable = intel_enable_irq_remapping,
1130 .disable = disable_irq_remapping,
1131 .reenable = reenable_irq_remapping,
1132 .enable_faulting = enable_drhd_fault_handling,
1133};
1134
1135static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
1136{
1137 struct intel_ir_data *ir_data = irqd->chip_data;
1138 struct irte *irte = &ir_data->irte_entry;
1139 struct irq_cfg *cfg = irqd_cfg(irqd);
1140
1141
1142
1143
1144
1145 irte->vector = cfg->vector;
1146 irte->dest_id = IRTE_DEST(cfg->dest_apicid);
1147
1148
1149 if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
1150 modify_irte(&ir_data->irq_2_iommu, irte);
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static int
1168intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
1169 bool force)
1170{
1171 struct irq_data *parent = data->parent_data;
1172 struct irq_cfg *cfg = irqd_cfg(data);
1173 int ret;
1174
1175 ret = parent->chip->irq_set_affinity(parent, mask, force);
1176 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
1177 return ret;
1178
1179 intel_ir_reconfigure_irte(data, false);
1180
1181
1182
1183
1184
1185 send_cleanup_vector(cfg);
1186
1187 return IRQ_SET_MASK_OK_DONE;
1188}
1189
1190static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
1191 struct msi_msg *msg)
1192{
1193 struct intel_ir_data *ir_data = irq_data->chip_data;
1194
1195 *msg = ir_data->msi_entry;
1196}
1197
1198static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
1199{
1200 struct intel_ir_data *ir_data = data->chip_data;
1201 struct vcpu_data *vcpu_pi_info = info;
1202
1203
1204 if (!vcpu_pi_info) {
1205 modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
1206 } else {
1207 struct irte irte_pi;
1208
1209
1210
1211
1212
1213
1214
1215
1216 memset(&irte_pi, 0, sizeof(irte_pi));
1217 dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
1218
1219
1220 irte_pi.p_pst = 1;
1221 irte_pi.p_urgent = 0;
1222 irte_pi.p_vector = vcpu_pi_info->vector;
1223 irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
1224 (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
1225 irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
1226 ~(-1UL << PDA_HIGH_BIT);
1227
1228 modify_irte(&ir_data->irq_2_iommu, &irte_pi);
1229 }
1230
1231 return 0;
1232}
1233
1234static struct irq_chip intel_ir_chip = {
1235 .name = "INTEL-IR",
1236 .irq_ack = apic_ack_irq,
1237 .irq_set_affinity = intel_ir_set_affinity,
1238 .irq_compose_msi_msg = intel_ir_compose_msi_msg,
1239 .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
1240};
1241
1242static void fill_msi_msg(struct msi_msg *msg, u32 index, u32 subhandle)
1243{
1244 memset(msg, 0, sizeof(*msg));
1245
1246 msg->arch_addr_lo.dmar_base_address = X86_MSI_BASE_ADDRESS_LOW;
1247 msg->arch_addr_lo.dmar_subhandle_valid = true;
1248 msg->arch_addr_lo.dmar_format = true;
1249 msg->arch_addr_lo.dmar_index_0_14 = index & 0x7FFF;
1250 msg->arch_addr_lo.dmar_index_15 = !!(index & 0x8000);
1251
1252 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
1253
1254 msg->arch_data.dmar_subhandle = subhandle;
1255}
1256
1257static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
1258 struct irq_cfg *irq_cfg,
1259 struct irq_alloc_info *info,
1260 int index, int sub_handle)
1261{
1262 struct irte *irte = &data->irte_entry;
1263
1264 prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
1265
1266 switch (info->type) {
1267 case X86_IRQ_ALLOC_TYPE_IOAPIC:
1268
1269 set_ioapic_sid(irte, info->devid);
1270 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
1271 info->devid, irte->present, irte->fpd,
1272 irte->dst_mode, irte->redir_hint,
1273 irte->trigger_mode, irte->dlvry_mode,
1274 irte->avail, irte->vector, irte->dest_id,
1275 irte->sid, irte->sq, irte->svt);
1276 sub_handle = info->ioapic.pin;
1277 break;
1278 case X86_IRQ_ALLOC_TYPE_HPET:
1279 set_hpet_sid(irte, info->devid);
1280 break;
1281 case X86_IRQ_ALLOC_TYPE_PCI_MSI:
1282 case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
1283 set_msi_sid(irte,
1284 pci_real_dma_dev(msi_desc_to_pci_dev(info->desc)));
1285 break;
1286 default:
1287 BUG_ON(1);
1288 break;
1289 }
1290 fill_msi_msg(&data->msi_entry, index, sub_handle);
1291}
1292
1293static void intel_free_irq_resources(struct irq_domain *domain,
1294 unsigned int virq, unsigned int nr_irqs)
1295{
1296 struct irq_data *irq_data;
1297 struct intel_ir_data *data;
1298 struct irq_2_iommu *irq_iommu;
1299 unsigned long flags;
1300 int i;
1301 for (i = 0; i < nr_irqs; i++) {
1302 irq_data = irq_domain_get_irq_data(domain, virq + i);
1303 if (irq_data && irq_data->chip_data) {
1304 data = irq_data->chip_data;
1305 irq_iommu = &data->irq_2_iommu;
1306 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
1307 clear_entries(irq_iommu);
1308 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
1309 irq_domain_reset_irq_data(irq_data);
1310 kfree(data);
1311 }
1312 }
1313}
1314
1315static int intel_irq_remapping_alloc(struct irq_domain *domain,
1316 unsigned int virq, unsigned int nr_irqs,
1317 void *arg)
1318{
1319 struct intel_iommu *iommu = domain->host_data;
1320 struct irq_alloc_info *info = arg;
1321 struct intel_ir_data *data, *ird;
1322 struct irq_data *irq_data;
1323 struct irq_cfg *irq_cfg;
1324 int i, ret, index;
1325
1326 if (!info || !iommu)
1327 return -EINVAL;
1328 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
1329 info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
1330 return -EINVAL;
1331
1332
1333
1334
1335
1336 if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
1337 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
1338
1339 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
1340 if (ret < 0)
1341 return ret;
1342
1343 ret = -ENOMEM;
1344 data = kzalloc(sizeof(*data), GFP_KERNEL);
1345 if (!data)
1346 goto out_free_parent;
1347
1348 down_read(&dmar_global_lock);
1349 index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
1350 up_read(&dmar_global_lock);
1351 if (index < 0) {
1352 pr_warn("Failed to allocate IRTE\n");
1353 kfree(data);
1354 goto out_free_parent;
1355 }
1356
1357 for (i = 0; i < nr_irqs; i++) {
1358 irq_data = irq_domain_get_irq_data(domain, virq + i);
1359 irq_cfg = irqd_cfg(irq_data);
1360 if (!irq_data || !irq_cfg) {
1361 if (!i)
1362 kfree(data);
1363 ret = -EINVAL;
1364 goto out_free_data;
1365 }
1366
1367 if (i > 0) {
1368 ird = kzalloc(sizeof(*ird), GFP_KERNEL);
1369 if (!ird)
1370 goto out_free_data;
1371
1372 ird->irq_2_iommu = data->irq_2_iommu;
1373 ird->irq_2_iommu.sub_handle = i;
1374 } else {
1375 ird = data;
1376 }
1377
1378 irq_data->hwirq = (index << 16) + i;
1379 irq_data->chip_data = ird;
1380 irq_data->chip = &intel_ir_chip;
1381 intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
1382 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
1383 }
1384 return 0;
1385
1386out_free_data:
1387 intel_free_irq_resources(domain, virq, i);
1388out_free_parent:
1389 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1390 return ret;
1391}
1392
1393static void intel_irq_remapping_free(struct irq_domain *domain,
1394 unsigned int virq, unsigned int nr_irqs)
1395{
1396 intel_free_irq_resources(domain, virq, nr_irqs);
1397 irq_domain_free_irqs_common(domain, virq, nr_irqs);
1398}
1399
1400static int intel_irq_remapping_activate(struct irq_domain *domain,
1401 struct irq_data *irq_data, bool reserve)
1402{
1403 intel_ir_reconfigure_irte(irq_data, true);
1404 return 0;
1405}
1406
1407static void intel_irq_remapping_deactivate(struct irq_domain *domain,
1408 struct irq_data *irq_data)
1409{
1410 struct intel_ir_data *data = irq_data->chip_data;
1411 struct irte entry;
1412
1413 memset(&entry, 0, sizeof(entry));
1414 modify_irte(&data->irq_2_iommu, &entry);
1415}
1416
1417static int intel_irq_remapping_select(struct irq_domain *d,
1418 struct irq_fwspec *fwspec,
1419 enum irq_domain_bus_token bus_token)
1420{
1421 struct intel_iommu *iommu = NULL;
1422
1423 if (x86_fwspec_is_ioapic(fwspec))
1424 iommu = map_ioapic_to_iommu(fwspec->param[0]);
1425 else if (x86_fwspec_is_hpet(fwspec))
1426 iommu = map_hpet_to_iommu(fwspec->param[0]);
1427
1428 return iommu && d == iommu->ir_domain;
1429}
1430
1431static const struct irq_domain_ops intel_ir_domain_ops = {
1432 .select = intel_irq_remapping_select,
1433 .alloc = intel_irq_remapping_alloc,
1434 .free = intel_irq_remapping_free,
1435 .activate = intel_irq_remapping_activate,
1436 .deactivate = intel_irq_remapping_deactivate,
1437};
1438
1439
1440
1441
1442static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1443{
1444 int ret;
1445 int eim = x2apic_enabled();
1446
1447 ret = intel_cap_audit(CAP_AUDIT_HOTPLUG_IRQR, iommu);
1448 if (ret)
1449 return ret;
1450
1451 if (eim && !ecap_eim_support(iommu->ecap)) {
1452 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1453 iommu->reg_phys, iommu->ecap);
1454 return -ENODEV;
1455 }
1456
1457 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1458 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1459 iommu->reg_phys);
1460 return -ENODEV;
1461 }
1462
1463
1464
1465
1466 ret = intel_setup_irq_remapping(iommu);
1467 if (ret) {
1468 pr_err("Failed to setup irq remapping for %s\n",
1469 iommu->name);
1470 intel_teardown_irq_remapping(iommu);
1471 ir_remove_ioapic_hpet_scope(iommu);
1472 } else {
1473 iommu_enable_irq_remapping(iommu);
1474 }
1475
1476 return ret;
1477}
1478
1479int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1480{
1481 int ret = 0;
1482 struct intel_iommu *iommu = dmaru->iommu;
1483
1484 if (!irq_remapping_enabled)
1485 return 0;
1486 if (iommu == NULL)
1487 return -EINVAL;
1488 if (!ecap_ir_support(iommu->ecap))
1489 return 0;
1490 if (irq_remapping_cap(IRQ_POSTING_CAP) &&
1491 !cap_pi_support(iommu->cap))
1492 return -EBUSY;
1493
1494 if (insert) {
1495 if (!iommu->ir_table)
1496 ret = dmar_ir_add(dmaru, iommu);
1497 } else {
1498 if (iommu->ir_table) {
1499 if (!bitmap_empty(iommu->ir_table->bitmap,
1500 INTR_REMAP_TABLE_ENTRIES)) {
1501 ret = -EBUSY;
1502 } else {
1503 iommu_disable_irq_remapping(iommu);
1504 intel_teardown_irq_remapping(iommu);
1505 ir_remove_ioapic_hpet_scope(iommu);
1506 }
1507 }
1508 }
1509
1510 return ret;
1511}
1512