1#include <linux/interrupt.h>
2#include <linux/dmar.h>
3#include <linux/spinlock.h>
4#include <linux/slab.h>
5#include <linux/jiffies.h>
6#include <linux/hpet.h>
7#include <linux/pci.h>
8#include <linux/irq.h>
9#include <asm/io_apic.h>
10#include <asm/smp.h>
11#include <asm/cpu.h>
12#include <linux/intel-iommu.h>
13#include <acpi/acpi.h>
14#include <asm/irq_remapping.h>
15#include <asm/pci-direct.h>
16#include <asm/msidef.h>
17
18#include "irq_remapping.h"
19
20struct ioapic_scope {
21 struct intel_iommu *iommu;
22 unsigned int id;
23 unsigned int bus;
24 unsigned int devfn;
25};
26
27struct hpet_scope {
28 struct intel_iommu *iommu;
29 u8 id;
30 unsigned int bus;
31 unsigned int devfn;
32};
33
34#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
35#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
36
37static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
38static struct hpet_scope ir_hpet[MAX_HPET_TBS];
39static int ir_ioapic_num, ir_hpet_num;
40
41static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
42
43static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
44{
45 struct irq_cfg *cfg = irq_get_chip_data(irq);
46 return cfg ? &cfg->irq_2_iommu : NULL;
47}
48
49int get_irte(int irq, struct irte *entry)
50{
51 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
52 unsigned long flags;
53 int index;
54
55 if (!entry || !irq_iommu)
56 return -1;
57
58 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
59
60 index = irq_iommu->irte_index + irq_iommu->sub_handle;
61 *entry = *(irq_iommu->iommu->ir_table->base + index);
62
63 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
64 return 0;
65}
66
67static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
68{
69 struct ir_table *table = iommu->ir_table;
70 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
71 struct irq_cfg *cfg = irq_get_chip_data(irq);
72 u16 index, start_index;
73 unsigned int mask = 0;
74 unsigned long flags;
75 int i;
76
77 if (!count || !irq_iommu)
78 return -1;
79
80
81
82
83 index = start_index = 0;
84
85 if (count > 1) {
86 count = __roundup_pow_of_two(count);
87 mask = ilog2(count);
88 }
89
90 if (mask > ecap_max_handle_mask(iommu->ecap)) {
91 printk(KERN_ERR
92 "Requested mask %x exceeds the max invalidation handle"
93 " mask value %Lx\n", mask,
94 ecap_max_handle_mask(iommu->ecap));
95 return -1;
96 }
97
98 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
99 do {
100 for (i = index; i < index + count; i++)
101 if (table->base[i].present)
102 break;
103
104 if (i == index + count)
105 break;
106
107 index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
108
109 if (index == start_index) {
110 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
111 printk(KERN_ERR "can't allocate an IRTE\n");
112 return -1;
113 }
114 } while (1);
115
116 for (i = index; i < index + count; i++)
117 table->base[i].present = 1;
118
119 cfg->remapped = 1;
120 irq_iommu->iommu = iommu;
121 irq_iommu->irte_index = index;
122 irq_iommu->sub_handle = 0;
123 irq_iommu->irte_mask = mask;
124
125 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
126
127 return index;
128}
129
130static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
131{
132 struct qi_desc desc;
133
134 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask)
135 | QI_IEC_SELECTIVE;
136 desc.high = 0;
137
138 return qi_submit_sync(&desc, iommu);
139}
140
141static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
142{
143 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
144 unsigned long flags;
145 int index;
146
147 if (!irq_iommu)
148 return -1;
149
150 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
151 *sub_handle = irq_iommu->sub_handle;
152 index = irq_iommu->irte_index;
153 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
154 return index;
155}
156
157static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
158{
159 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
160 struct irq_cfg *cfg = irq_get_chip_data(irq);
161 unsigned long flags;
162
163 if (!irq_iommu)
164 return -1;
165
166 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
167
168 cfg->remapped = 1;
169 irq_iommu->iommu = iommu;
170 irq_iommu->irte_index = index;
171 irq_iommu->sub_handle = subhandle;
172 irq_iommu->irte_mask = 0;
173
174 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
175
176 return 0;
177}
178
179static int modify_irte(int irq, struct irte *irte_modified)
180{
181 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
182 struct intel_iommu *iommu;
183 unsigned long flags;
184 struct irte *irte;
185 int rc, index;
186
187 if (!irq_iommu)
188 return -1;
189
190 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
191
192 iommu = irq_iommu->iommu;
193
194 index = irq_iommu->irte_index + irq_iommu->sub_handle;
195 irte = &iommu->ir_table->base[index];
196
197 set_64bit(&irte->low, irte_modified->low);
198 set_64bit(&irte->high, irte_modified->high);
199 __iommu_flush_cache(iommu, irte, sizeof(*irte));
200
201 rc = qi_flush_iec(iommu, index, 0);
202 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
203
204 return rc;
205}
206
207static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
208{
209 int i;
210
211 for (i = 0; i < MAX_HPET_TBS; i++)
212 if (ir_hpet[i].id == hpet_id)
213 return ir_hpet[i].iommu;
214 return NULL;
215}
216
217static struct intel_iommu *map_ioapic_to_ir(int apic)
218{
219 int i;
220
221 for (i = 0; i < MAX_IO_APICS; i++)
222 if (ir_ioapic[i].id == apic)
223 return ir_ioapic[i].iommu;
224 return NULL;
225}
226
227static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
228{
229 struct dmar_drhd_unit *drhd;
230
231 drhd = dmar_find_matched_drhd_unit(dev);
232 if (!drhd)
233 return NULL;
234
235 return drhd->iommu;
236}
237
238static int clear_entries(struct irq_2_iommu *irq_iommu)
239{
240 struct irte *start, *entry, *end;
241 struct intel_iommu *iommu;
242 int index;
243
244 if (irq_iommu->sub_handle)
245 return 0;
246
247 iommu = irq_iommu->iommu;
248 index = irq_iommu->irte_index + irq_iommu->sub_handle;
249
250 start = iommu->ir_table->base + index;
251 end = start + (1 << irq_iommu->irte_mask);
252
253 for (entry = start; entry < end; entry++) {
254 set_64bit(&entry->low, 0);
255 set_64bit(&entry->high, 0);
256 }
257
258 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
259}
260
261static int free_irte(int irq)
262{
263 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
264 unsigned long flags;
265 int rc;
266
267 if (!irq_iommu)
268 return -1;
269
270 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
271
272 rc = clear_entries(irq_iommu);
273
274 irq_iommu->iommu = NULL;
275 irq_iommu->irte_index = 0;
276 irq_iommu->sub_handle = 0;
277 irq_iommu->irte_mask = 0;
278
279 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
280
281 return rc;
282}
283
284
285
286
287#define SVT_NO_VERIFY 0x0
288#define SVT_VERIFY_SID_SQ 0x1
289#define SVT_VERIFY_BUS 0x2
290
291
292
293
294#define SQ_ALL_16 0x0
295#define SQ_13_IGNORE_1 0x1
296
297
298#define SQ_13_IGNORE_2 0x2
299
300
301#define SQ_13_IGNORE_3 0x3
302
303
304
305
306
307
308
309static void set_irte_sid(struct irte *irte, unsigned int svt,
310 unsigned int sq, unsigned int sid)
311{
312 if (disable_sourceid_checking)
313 svt = SVT_NO_VERIFY;
314 irte->svt = svt;
315 irte->sq = sq;
316 irte->sid = sid;
317}
318
319static int set_ioapic_sid(struct irte *irte, int apic)
320{
321 int i;
322 u16 sid = 0;
323
324 if (!irte)
325 return -1;
326
327 for (i = 0; i < MAX_IO_APICS; i++) {
328 if (ir_ioapic[i].id == apic) {
329 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
330 break;
331 }
332 }
333
334 if (sid == 0) {
335 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
336 return -1;
337 }
338
339 set_irte_sid(irte, 1, 0, sid);
340
341 return 0;
342}
343
344static int set_hpet_sid(struct irte *irte, u8 id)
345{
346 int i;
347 u16 sid = 0;
348
349 if (!irte)
350 return -1;
351
352 for (i = 0; i < MAX_HPET_TBS; i++) {
353 if (ir_hpet[i].id == id) {
354 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
355 break;
356 }
357 }
358
359 if (sid == 0) {
360 pr_warning("Failed to set source-id of HPET block (%d)\n", id);
361 return -1;
362 }
363
364
365
366
367
368
369 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid);
370
371 return 0;
372}
373
374static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
375{
376 struct pci_dev *bridge;
377
378 if (!irte || !dev)
379 return -1;
380
381
382 if (pci_is_pcie(dev) || !dev->bus->parent) {
383 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
384 (dev->bus->number << 8) | dev->devfn);
385 return 0;
386 }
387
388 bridge = pci_find_upstream_pcie_bridge(dev);
389 if (bridge) {
390 if (pci_is_pcie(bridge))
391 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
392 (bridge->bus->number << 8) | dev->bus->number);
393 else
394 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
395 (bridge->bus->number << 8) | bridge->devfn);
396 }
397
398 return 0;
399}
400
401static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
402{
403 u64 addr;
404 u32 sts;
405 unsigned long flags;
406
407 addr = virt_to_phys((void *)iommu->ir_table->base);
408
409 raw_spin_lock_irqsave(&iommu->register_lock, flags);
410
411 dmar_writeq(iommu->reg + DMAR_IRTA_REG,
412 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
413
414
415 iommu->gcmd |= DMA_GCMD_SIRTP;
416 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
417
418 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
419 readl, (sts & DMA_GSTS_IRTPS), sts);
420 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
421
422
423
424
425
426 qi_global_iec(iommu);
427
428 raw_spin_lock_irqsave(&iommu->register_lock, flags);
429
430
431 iommu->gcmd |= DMA_GCMD_IRE;
432 iommu->gcmd &= ~DMA_GCMD_CFI;
433 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
434
435 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
436 readl, (sts & DMA_GSTS_IRES), sts);
437
438
439
440
441
442
443 if (sts & DMA_GSTS_CFIS)
444 WARN(1, KERN_WARNING
445 "Compatibility-format IRQs enabled despite intr remapping;\n"
446 "you are vulnerable to IRQ injection.\n");
447
448 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
449}
450
451
452static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
453{
454 struct ir_table *ir_table;
455 struct page *pages;
456
457 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
458 GFP_ATOMIC);
459
460 if (!iommu->ir_table)
461 return -ENOMEM;
462
463 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
464 INTR_REMAP_PAGE_ORDER);
465
466 if (!pages) {
467 printk(KERN_ERR "failed to allocate pages of order %d\n",
468 INTR_REMAP_PAGE_ORDER);
469 kfree(iommu->ir_table);
470 return -ENOMEM;
471 }
472
473 ir_table->base = page_address(pages);
474
475 iommu_set_irq_remapping(iommu, mode);
476 return 0;
477}
478
479
480
481
482static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
483{
484 unsigned long flags;
485 u32 sts;
486
487 if (!ecap_ir_support(iommu->ecap))
488 return;
489
490
491
492
493
494 qi_global_iec(iommu);
495
496 raw_spin_lock_irqsave(&iommu->register_lock, flags);
497
498 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
499 if (!(sts & DMA_GSTS_IRES))
500 goto end;
501
502 iommu->gcmd &= ~DMA_GCMD_IRE;
503 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
504
505 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
506 readl, !(sts & DMA_GSTS_IRES), sts);
507
508end:
509 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
510}
511
512static int __init dmar_x2apic_optout(void)
513{
514 struct acpi_table_dmar *dmar;
515 dmar = (struct acpi_table_dmar *)dmar_tbl;
516 if (!dmar || no_x2apic_optout)
517 return 0;
518 return dmar->flags & DMAR_X2APIC_OPT_OUT;
519}
520
521static int __init intel_irq_remapping_supported(void)
522{
523 struct dmar_drhd_unit *drhd;
524
525 if (disable_irq_remap)
526 return 0;
527 if (irq_remap_broken) {
528 printk(KERN_WARNING
529 "This system BIOS has enabled interrupt remapping\n"
530 "on a chipset that contains an erratum making that\n"
531 "feature unstable. To maintain system stability\n"
532 "interrupt remapping is being disabled. Please\n"
533 "contact your BIOS vendor for an update\n");
534 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
535 disable_irq_remap = 1;
536 return 0;
537 }
538
539 if (!dmar_ir_support())
540 return 0;
541
542 for_each_drhd_unit(drhd) {
543 struct intel_iommu *iommu = drhd->iommu;
544
545 if (!ecap_ir_support(iommu->ecap))
546 return 0;
547 }
548
549 return 1;
550}
551
552static int __init intel_enable_irq_remapping(void)
553{
554 struct dmar_drhd_unit *drhd;
555 bool x2apic_present;
556 int setup = 0;
557 int eim = 0;
558
559 x2apic_present = x2apic_supported();
560
561 if (parse_ioapics_under_ir() != 1) {
562 printk(KERN_INFO "Not enable interrupt remapping\n");
563 goto error;
564 }
565
566 if (x2apic_present) {
567 eim = !dmar_x2apic_optout();
568 if (!eim)
569 printk(KERN_WARNING
570 "Your BIOS is broken and requested that x2apic be disabled.\n"
571 "This will slightly decrease performance.\n"
572 "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
573 }
574
575 for_each_drhd_unit(drhd) {
576 struct intel_iommu *iommu = drhd->iommu;
577
578
579
580
581
582 if (iommu->qi)
583 continue;
584
585
586
587
588 dmar_fault(-1, iommu);
589
590
591
592
593
594 iommu_disable_irq_remapping(iommu);
595
596 dmar_disable_qi(iommu);
597 }
598
599
600
601
602 for_each_drhd_unit(drhd) {
603 struct intel_iommu *iommu = drhd->iommu;
604
605 if (!ecap_ir_support(iommu->ecap))
606 continue;
607
608 if (eim && !ecap_eim_support(iommu->ecap)) {
609 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
610 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
611 goto error;
612 }
613 }
614
615
616
617
618 for_each_drhd_unit(drhd) {
619 int ret;
620 struct intel_iommu *iommu = drhd->iommu;
621 ret = dmar_enable_qi(iommu);
622
623 if (ret) {
624 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
625 " invalidation, ecap %Lx, ret %d\n",
626 drhd->reg_base_addr, iommu->ecap, ret);
627 goto error;
628 }
629 }
630
631
632
633
634 for_each_drhd_unit(drhd) {
635 struct intel_iommu *iommu = drhd->iommu;
636
637 if (!ecap_ir_support(iommu->ecap))
638 continue;
639
640 if (intel_setup_irq_remapping(iommu, eim))
641 goto error;
642
643 setup = 1;
644 }
645
646 if (!setup)
647 goto error;
648
649 irq_remapping_enabled = 1;
650
651
652
653
654
655
656 x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
657
658 pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
659
660 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
661
662error:
663
664
665
666
667 if (x2apic_present)
668 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
669
670 return -1;
671}
672
673static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
674 struct intel_iommu *iommu)
675{
676 struct acpi_dmar_pci_path *path;
677 u8 bus;
678 int count;
679
680 bus = scope->bus;
681 path = (struct acpi_dmar_pci_path *)(scope + 1);
682 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
683 / sizeof(struct acpi_dmar_pci_path);
684
685 while (--count > 0) {
686
687
688
689
690 bus = read_pci_config_byte(bus, path->device, path->function,
691 PCI_SECONDARY_BUS);
692 path++;
693 }
694 ir_hpet[ir_hpet_num].bus = bus;
695 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function);
696 ir_hpet[ir_hpet_num].iommu = iommu;
697 ir_hpet[ir_hpet_num].id = scope->enumeration_id;
698 ir_hpet_num++;
699}
700
701static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
702 struct intel_iommu *iommu)
703{
704 struct acpi_dmar_pci_path *path;
705 u8 bus;
706 int count;
707
708 bus = scope->bus;
709 path = (struct acpi_dmar_pci_path *)(scope + 1);
710 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
711 / sizeof(struct acpi_dmar_pci_path);
712
713 while (--count > 0) {
714
715
716
717
718 bus = read_pci_config_byte(bus, path->device, path->function,
719 PCI_SECONDARY_BUS);
720 path++;
721 }
722
723 ir_ioapic[ir_ioapic_num].bus = bus;
724 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function);
725 ir_ioapic[ir_ioapic_num].iommu = iommu;
726 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
727 ir_ioapic_num++;
728}
729
730static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
731 struct intel_iommu *iommu)
732{
733 struct acpi_dmar_hardware_unit *drhd;
734 struct acpi_dmar_device_scope *scope;
735 void *start, *end;
736
737 drhd = (struct acpi_dmar_hardware_unit *)header;
738
739 start = (void *)(drhd + 1);
740 end = ((void *)drhd) + header->length;
741
742 while (start < end) {
743 scope = start;
744 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
745 if (ir_ioapic_num == MAX_IO_APICS) {
746 printk(KERN_WARNING "Exceeded Max IO APICS\n");
747 return -1;
748 }
749
750 printk(KERN_INFO "IOAPIC id %d under DRHD base "
751 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
752 drhd->address, iommu->seq_id);
753
754 ir_parse_one_ioapic_scope(scope, iommu);
755 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) {
756 if (ir_hpet_num == MAX_HPET_TBS) {
757 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
758 return -1;
759 }
760
761 printk(KERN_INFO "HPET id %d under DRHD base"
762 " 0x%Lx\n", scope->enumeration_id,
763 drhd->address);
764
765 ir_parse_one_hpet_scope(scope, iommu);
766 }
767 start += scope->length;
768 }
769
770 return 0;
771}
772
773
774
775
776
777int __init parse_ioapics_under_ir(void)
778{
779 struct dmar_drhd_unit *drhd;
780 int ir_supported = 0;
781 int ioapic_idx;
782
783 for_each_drhd_unit(drhd) {
784 struct intel_iommu *iommu = drhd->iommu;
785
786 if (ecap_ir_support(iommu->ecap)) {
787 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
788 return -1;
789
790 ir_supported = 1;
791 }
792 }
793
794 if (!ir_supported)
795 return 0;
796
797 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) {
798 int ioapic_id = mpc_ioapic_id(ioapic_idx);
799 if (!map_ioapic_to_ir(ioapic_id)) {
800 pr_err(FW_BUG "ioapic %d has no mapping iommu, "
801 "interrupt remapping will be disabled\n",
802 ioapic_id);
803 return -1;
804 }
805 }
806
807 return 1;
808}
809
810int __init ir_dev_scope_init(void)
811{
812 if (!irq_remapping_enabled)
813 return 0;
814
815 return dmar_dev_scope_init();
816}
817rootfs_initcall(ir_dev_scope_init);
818
819static void disable_irq_remapping(void)
820{
821 struct dmar_drhd_unit *drhd;
822 struct intel_iommu *iommu = NULL;
823
824
825
826
827 for_each_iommu(iommu, drhd) {
828 if (!ecap_ir_support(iommu->ecap))
829 continue;
830
831 iommu_disable_irq_remapping(iommu);
832 }
833}
834
835static int reenable_irq_remapping(int eim)
836{
837 struct dmar_drhd_unit *drhd;
838 int setup = 0;
839 struct intel_iommu *iommu = NULL;
840
841 for_each_iommu(iommu, drhd)
842 if (iommu->qi)
843 dmar_reenable_qi(iommu);
844
845
846
847
848 for_each_iommu(iommu, drhd) {
849 if (!ecap_ir_support(iommu->ecap))
850 continue;
851
852
853 iommu_set_irq_remapping(iommu, eim);
854 setup = 1;
855 }
856
857 if (!setup)
858 goto error;
859
860 return 0;
861
862error:
863
864
865
866 return -1;
867}
868
869static void prepare_irte(struct irte *irte, int vector,
870 unsigned int dest)
871{
872 memset(irte, 0, sizeof(*irte));
873
874 irte->present = 1;
875 irte->dst_mode = apic->irq_dest_mode;
876
877
878
879
880
881
882
883 irte->trigger_mode = 0;
884 irte->dlvry_mode = apic->irq_delivery_mode;
885 irte->vector = vector;
886 irte->dest_id = IRTE_DEST(dest);
887 irte->redir_hint = 1;
888}
889
890static int intel_setup_ioapic_entry(int irq,
891 struct IO_APIC_route_entry *route_entry,
892 unsigned int destination, int vector,
893 struct io_apic_irq_attr *attr)
894{
895 int ioapic_id = mpc_ioapic_id(attr->ioapic);
896 struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id);
897 struct IR_IO_APIC_route_entry *entry;
898 struct irte irte;
899 int index;
900
901 if (!iommu) {
902 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
903 return -ENODEV;
904 }
905
906 entry = (struct IR_IO_APIC_route_entry *)route_entry;
907
908 index = alloc_irte(iommu, irq, 1);
909 if (index < 0) {
910 pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id);
911 return -ENOMEM;
912 }
913
914 prepare_irte(&irte, vector, destination);
915
916
917 set_ioapic_sid(&irte, ioapic_id);
918
919 modify_irte(irq, &irte);
920
921 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
922 "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
923 "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
924 "Avail:%X Vector:%02X Dest:%08X "
925 "SID:%04X SQ:%X SVT:%X)\n",
926 attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
927 irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
928 irte.avail, irte.vector, irte.dest_id,
929 irte.sid, irte.sq, irte.svt);
930
931 memset(entry, 0, sizeof(*entry));
932
933 entry->index2 = (index >> 15) & 0x1;
934 entry->zero = 0;
935 entry->format = 1;
936 entry->index = (index & 0x7fff);
937
938
939
940
941 entry->vector = attr->ioapic_pin;
942 entry->mask = 0;
943 entry->trigger = attr->trigger;
944 entry->polarity = attr->polarity;
945
946
947
948
949 if (attr->trigger)
950 entry->mask = 1;
951
952 return 0;
953}
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969static int
970intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
971 bool force)
972{
973 struct irq_cfg *cfg = data->chip_data;
974 unsigned int dest, irq = data->irq;
975 struct irte irte;
976 int err;
977
978 if (!config_enabled(CONFIG_SMP))
979 return -EINVAL;
980
981 if (!cpumask_intersects(mask, cpu_online_mask))
982 return -EINVAL;
983
984 if (get_irte(irq, &irte))
985 return -EBUSY;
986
987 err = assign_irq_vector(irq, cfg, mask);
988 if (err)
989 return err;
990
991 err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
992 if (err) {
993 if (assign_irq_vector(irq, cfg, data->affinity))
994 pr_err("Failed to recover vector for irq %d\n", irq);
995 return err;
996 }
997
998 irte.vector = cfg->vector;
999 irte.dest_id = IRTE_DEST(dest);
1000
1001
1002
1003
1004
1005 modify_irte(irq, &irte);
1006
1007
1008
1009
1010
1011
1012 if (cfg->move_in_progress)
1013 send_cleanup_vector(cfg);
1014
1015 cpumask_copy(data->affinity, mask);
1016 return 0;
1017}
1018
1019static void intel_compose_msi_msg(struct pci_dev *pdev,
1020 unsigned int irq, unsigned int dest,
1021 struct msi_msg *msg, u8 hpet_id)
1022{
1023 struct irq_cfg *cfg;
1024 struct irte irte;
1025 u16 sub_handle = 0;
1026 int ir_index;
1027
1028 cfg = irq_get_chip_data(irq);
1029
1030 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
1031 BUG_ON(ir_index == -1);
1032
1033 prepare_irte(&irte, cfg->vector, dest);
1034
1035
1036 if (pdev)
1037 set_msi_sid(&irte, pdev);
1038 else
1039 set_hpet_sid(&irte, hpet_id);
1040
1041 modify_irte(irq, &irte);
1042
1043 msg->address_hi = MSI_ADDR_BASE_HI;
1044 msg->data = sub_handle;
1045 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
1046 MSI_ADDR_IR_SHV |
1047 MSI_ADDR_IR_INDEX1(ir_index) |
1048 MSI_ADDR_IR_INDEX2(ir_index);
1049}
1050
1051
1052
1053
1054
1055
1056static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
1057{
1058 struct intel_iommu *iommu;
1059 int index;
1060
1061 iommu = map_dev_to_ir(dev);
1062 if (!iommu) {
1063 printk(KERN_ERR
1064 "Unable to map PCI %s to iommu\n", pci_name(dev));
1065 return -ENOENT;
1066 }
1067
1068 index = alloc_irte(iommu, irq, nvec);
1069 if (index < 0) {
1070 printk(KERN_ERR
1071 "Unable to allocate %d IRTE for PCI %s\n", nvec,
1072 pci_name(dev));
1073 return -ENOSPC;
1074 }
1075 return index;
1076}
1077
1078static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
1079 int index, int sub_handle)
1080{
1081 struct intel_iommu *iommu;
1082
1083 iommu = map_dev_to_ir(pdev);
1084 if (!iommu)
1085 return -ENOENT;
1086
1087
1088
1089
1090
1091 set_irte_irq(irq, iommu, index, sub_handle);
1092
1093 return 0;
1094}
1095
1096static int intel_setup_hpet_msi(unsigned int irq, unsigned int id)
1097{
1098 struct intel_iommu *iommu = map_hpet_to_ir(id);
1099 int index;
1100
1101 if (!iommu)
1102 return -1;
1103
1104 index = alloc_irte(iommu, irq, 1);
1105 if (index < 0)
1106 return -1;
1107
1108 return 0;
1109}
1110
1111struct irq_remap_ops intel_irq_remap_ops = {
1112 .supported = intel_irq_remapping_supported,
1113 .prepare = dmar_table_init,
1114 .enable = intel_enable_irq_remapping,
1115 .disable = disable_irq_remapping,
1116 .reenable = reenable_irq_remapping,
1117 .enable_faulting = enable_drhd_fault_handling,
1118 .setup_ioapic_entry = intel_setup_ioapic_entry,
1119 .set_affinity = intel_ioapic_set_affinity,
1120 .free_irq = free_irte,
1121 .compose_msi_msg = intel_compose_msi_msg,
1122 .msi_alloc_irq = intel_msi_alloc_irq,
1123 .msi_setup_irq = intel_msi_setup_irq,
1124 .setup_hpet_msi = intel_setup_hpet_msi,
1125};
1126