1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
33#include <linux/timer.h>
34#include <linux/irq.h>
35#include <linux/interrupt.h>
36#include <linux/tboot.h>
37#include <linux/dmi.h>
38#include <linux/slab.h>
39#include <asm/iommu_table.h>
40
41#define PREFIX "DMAR: "
42
43
44
45
46
47LIST_HEAD(dmar_drhd_units);
48
49static struct acpi_table_header * __initdata dmar_tbl;
50static acpi_size dmar_tbl_size;
51
52static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
53{
54
55
56
57
58 if (drhd->include_all)
59 list_add_tail(&drhd->list, &dmar_drhd_units);
60 else
61 list_add(&drhd->list, &dmar_drhd_units);
62}
63
64static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
65 struct pci_dev **dev, u16 segment)
66{
67 struct pci_bus *bus;
68 struct pci_dev *pdev = NULL;
69 struct acpi_dmar_pci_path *path;
70 int count;
71
72 bus = pci_find_bus(segment, scope->bus);
73 path = (struct acpi_dmar_pci_path *)(scope + 1);
74 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
75 / sizeof(struct acpi_dmar_pci_path);
76
77 while (count) {
78 if (pdev)
79 pci_dev_put(pdev);
80
81
82
83
84 if (!bus) {
85 printk(KERN_WARNING
86 PREFIX "Device scope bus [%d] not found\n",
87 scope->bus);
88 break;
89 }
90 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
91 if (!pdev) {
92 printk(KERN_WARNING PREFIX
93 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
94 segment, bus->number, path->dev, path->fn);
95 break;
96 }
97 path ++;
98 count --;
99 bus = pdev->subordinate;
100 }
101 if (!pdev) {
102 printk(KERN_WARNING PREFIX
103 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
104 segment, scope->bus, path->dev, path->fn);
105 *dev = NULL;
106 return 0;
107 }
108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
109 pdev->subordinate) || (scope->entry_type == \
110 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
111 pci_dev_put(pdev);
112 printk(KERN_WARNING PREFIX
113 "Device scope type does not match for %s\n",
114 pci_name(pdev));
115 return -EINVAL;
116 }
117 *dev = pdev;
118 return 0;
119}
120
121static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
122 struct pci_dev ***devices, u16 segment)
123{
124 struct acpi_dmar_device_scope *scope;
125 void * tmp = start;
126 int index;
127 int ret;
128
129 *cnt = 0;
130 while (start < end) {
131 scope = start;
132 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
133 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
134 (*cnt)++;
135 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
136 printk(KERN_WARNING PREFIX
137 "Unsupported device scope\n");
138 }
139 start += scope->length;
140 }
141 if (*cnt == 0)
142 return 0;
143
144 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
145 if (!*devices)
146 return -ENOMEM;
147
148 start = tmp;
149 index = 0;
150 while (start < end) {
151 scope = start;
152 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
153 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
154 ret = dmar_parse_one_dev_scope(scope,
155 &(*devices)[index], segment);
156 if (ret) {
157 kfree(*devices);
158 return ret;
159 }
160 index ++;
161 }
162 start += scope->length;
163 }
164
165 return 0;
166}
167
168
169
170
171
172
173static int __init
174dmar_parse_one_drhd(struct acpi_dmar_header *header)
175{
176 struct acpi_dmar_hardware_unit *drhd;
177 struct dmar_drhd_unit *dmaru;
178 int ret = 0;
179
180 drhd = (struct acpi_dmar_hardware_unit *)header;
181 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
182 if (!dmaru)
183 return -ENOMEM;
184
185 dmaru->hdr = header;
186 dmaru->reg_base_addr = drhd->address;
187 dmaru->segment = drhd->segment;
188 dmaru->include_all = drhd->flags & 0x1;
189
190 ret = alloc_iommu(dmaru);
191 if (ret) {
192 kfree(dmaru);
193 return ret;
194 }
195 dmar_register_drhd_unit(dmaru);
196 return 0;
197}
198
199static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
200{
201 struct acpi_dmar_hardware_unit *drhd;
202 int ret = 0;
203
204 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
205
206 if (dmaru->include_all)
207 return 0;
208
209 ret = dmar_parse_dev_scope((void *)(drhd + 1),
210 ((void *)drhd) + drhd->header.length,
211 &dmaru->devices_cnt, &dmaru->devices,
212 drhd->segment);
213 if (ret) {
214 list_del(&dmaru->list);
215 kfree(dmaru);
216 }
217 return ret;
218}
219
220#ifdef CONFIG_DMAR
221LIST_HEAD(dmar_rmrr_units);
222
223static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
224{
225 list_add(&rmrr->list, &dmar_rmrr_units);
226}
227
228
229static int __init
230dmar_parse_one_rmrr(struct acpi_dmar_header *header)
231{
232 struct acpi_dmar_reserved_memory *rmrr;
233 struct dmar_rmrr_unit *rmrru;
234
235 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
236 if (!rmrru)
237 return -ENOMEM;
238
239 rmrru->hdr = header;
240 rmrr = (struct acpi_dmar_reserved_memory *)header;
241 rmrru->base_address = rmrr->base_address;
242 rmrru->end_address = rmrr->end_address;
243
244 dmar_register_rmrr_unit(rmrru);
245 return 0;
246}
247
248static int __init
249rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
250{
251 struct acpi_dmar_reserved_memory *rmrr;
252 int ret;
253
254 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
255 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
256 ((void *)rmrr) + rmrr->header.length,
257 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
258
259 if (ret || (rmrru->devices_cnt == 0)) {
260 list_del(&rmrru->list);
261 kfree(rmrru);
262 }
263 return ret;
264}
265
266static LIST_HEAD(dmar_atsr_units);
267
268static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
269{
270 struct acpi_dmar_atsr *atsr;
271 struct dmar_atsr_unit *atsru;
272
273 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
274 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
275 if (!atsru)
276 return -ENOMEM;
277
278 atsru->hdr = hdr;
279 atsru->include_all = atsr->flags & 0x1;
280
281 list_add(&atsru->list, &dmar_atsr_units);
282
283 return 0;
284}
285
286static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
287{
288 int rc;
289 struct acpi_dmar_atsr *atsr;
290
291 if (atsru->include_all)
292 return 0;
293
294 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
295 rc = dmar_parse_dev_scope((void *)(atsr + 1),
296 (void *)atsr + atsr->header.length,
297 &atsru->devices_cnt, &atsru->devices,
298 atsr->segment);
299 if (rc || !atsru->devices_cnt) {
300 list_del(&atsru->list);
301 kfree(atsru);
302 }
303
304 return rc;
305}
306
307int dmar_find_matched_atsr_unit(struct pci_dev *dev)
308{
309 int i;
310 struct pci_bus *bus;
311 struct acpi_dmar_atsr *atsr;
312 struct dmar_atsr_unit *atsru;
313
314 dev = pci_physfn(dev);
315
316 list_for_each_entry(atsru, &dmar_atsr_units, list) {
317 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
318 if (atsr->segment == pci_domain_nr(dev->bus))
319 goto found;
320 }
321
322 return 0;
323
324found:
325 for (bus = dev->bus; bus; bus = bus->parent) {
326 struct pci_dev *bridge = bus->self;
327
328 if (!bridge || !pci_is_pcie(bridge) ||
329 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
330 return 0;
331
332 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
333 for (i = 0; i < atsru->devices_cnt; i++)
334 if (atsru->devices[i] == bridge)
335 return 1;
336 break;
337 }
338 }
339
340 if (atsru->include_all)
341 return 1;
342
343 return 0;
344}
345#endif
346
347#ifdef CONFIG_ACPI_NUMA
348static int __init
349dmar_parse_one_rhsa(struct acpi_dmar_header *header)
350{
351 struct acpi_dmar_rhsa *rhsa;
352 struct dmar_drhd_unit *drhd;
353
354 rhsa = (struct acpi_dmar_rhsa *)header;
355 for_each_drhd_unit(drhd) {
356 if (drhd->reg_base_addr == rhsa->base_address) {
357 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
358
359 if (!node_online(node))
360 node = -1;
361 drhd->iommu->node = node;
362 return 0;
363 }
364 }
365 WARN_TAINT(
366 1, TAINT_FIRMWARE_WORKAROUND,
367 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
368 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
369 drhd->reg_base_addr,
370 dmi_get_system_info(DMI_BIOS_VENDOR),
371 dmi_get_system_info(DMI_BIOS_VERSION),
372 dmi_get_system_info(DMI_PRODUCT_VERSION));
373
374 return 0;
375}
376#endif
377
378static void __init
379dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
380{
381 struct acpi_dmar_hardware_unit *drhd;
382 struct acpi_dmar_reserved_memory *rmrr;
383 struct acpi_dmar_atsr *atsr;
384 struct acpi_dmar_rhsa *rhsa;
385
386 switch (header->type) {
387 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
388 drhd = container_of(header, struct acpi_dmar_hardware_unit,
389 header);
390 printk (KERN_INFO PREFIX
391 "DRHD base: %#016Lx flags: %#x\n",
392 (unsigned long long)drhd->address, drhd->flags);
393 break;
394 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
395 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
396 header);
397 printk (KERN_INFO PREFIX
398 "RMRR base: %#016Lx end: %#016Lx\n",
399 (unsigned long long)rmrr->base_address,
400 (unsigned long long)rmrr->end_address);
401 break;
402 case ACPI_DMAR_TYPE_ATSR:
403 atsr = container_of(header, struct acpi_dmar_atsr, header);
404 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
405 break;
406 case ACPI_DMAR_HARDWARE_AFFINITY:
407 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
408 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
409 (unsigned long long)rhsa->base_address,
410 rhsa->proximity_domain);
411 break;
412 }
413}
414
415
416
417
418static int __init dmar_table_detect(void)
419{
420 acpi_status status = AE_OK;
421
422
423 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
424 (struct acpi_table_header **)&dmar_tbl,
425 &dmar_tbl_size);
426
427 if (ACPI_SUCCESS(status) && !dmar_tbl) {
428 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
429 status = AE_NOT_FOUND;
430 }
431
432 return (ACPI_SUCCESS(status) ? 1 : 0);
433}
434
435
436
437
438static int __init
439parse_dmar_table(void)
440{
441 struct acpi_table_dmar *dmar;
442 struct acpi_dmar_header *entry_header;
443 int ret = 0;
444
445
446
447
448
449 dmar_table_detect();
450
451
452
453
454
455 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
456
457 dmar = (struct acpi_table_dmar *)dmar_tbl;
458 if (!dmar)
459 return -ENODEV;
460
461 if (dmar->width < PAGE_SHIFT - 1) {
462 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
463 return -EINVAL;
464 }
465
466 printk (KERN_INFO PREFIX "Host address width %d\n",
467 dmar->width + 1);
468
469 entry_header = (struct acpi_dmar_header *)(dmar + 1);
470 while (((unsigned long)entry_header) <
471 (((unsigned long)dmar) + dmar_tbl->length)) {
472
473 if (entry_header->length == 0) {
474 printk(KERN_WARNING PREFIX
475 "Invalid 0-length structure\n");
476 ret = -EINVAL;
477 break;
478 }
479
480 dmar_table_print_dmar_entry(entry_header);
481
482 switch (entry_header->type) {
483 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
484 ret = dmar_parse_one_drhd(entry_header);
485 break;
486 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
487#ifdef CONFIG_DMAR
488 ret = dmar_parse_one_rmrr(entry_header);
489#endif
490 break;
491 case ACPI_DMAR_TYPE_ATSR:
492#ifdef CONFIG_DMAR
493 ret = dmar_parse_one_atsr(entry_header);
494#endif
495 break;
496 case ACPI_DMAR_HARDWARE_AFFINITY:
497#ifdef CONFIG_ACPI_NUMA
498 ret = dmar_parse_one_rhsa(entry_header);
499#endif
500 break;
501 default:
502 printk(KERN_WARNING PREFIX
503 "Unknown DMAR structure type %d\n",
504 entry_header->type);
505 ret = 0;
506 break;
507 }
508 if (ret)
509 break;
510
511 entry_header = ((void *)entry_header + entry_header->length);
512 }
513 return ret;
514}
515
516static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
517 struct pci_dev *dev)
518{
519 int index;
520
521 while (dev) {
522 for (index = 0; index < cnt; index++)
523 if (dev == devices[index])
524 return 1;
525
526
527 dev = dev->bus->self;
528 }
529
530 return 0;
531}
532
533struct dmar_drhd_unit *
534dmar_find_matched_drhd_unit(struct pci_dev *dev)
535{
536 struct dmar_drhd_unit *dmaru = NULL;
537 struct acpi_dmar_hardware_unit *drhd;
538
539 dev = pci_physfn(dev);
540
541 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
542 drhd = container_of(dmaru->hdr,
543 struct acpi_dmar_hardware_unit,
544 header);
545
546 if (dmaru->include_all &&
547 drhd->segment == pci_domain_nr(dev->bus))
548 return dmaru;
549
550 if (dmar_pci_device_match(dmaru->devices,
551 dmaru->devices_cnt, dev))
552 return dmaru;
553 }
554
555 return NULL;
556}
557
558int __init dmar_dev_scope_init(void)
559{
560 struct dmar_drhd_unit *drhd, *drhd_n;
561 int ret = -ENODEV;
562
563 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
564 ret = dmar_parse_dev(drhd);
565 if (ret)
566 return ret;
567 }
568
569#ifdef CONFIG_DMAR
570 {
571 struct dmar_rmrr_unit *rmrr, *rmrr_n;
572 struct dmar_atsr_unit *atsr, *atsr_n;
573
574 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
575 ret = rmrr_parse_dev(rmrr);
576 if (ret)
577 return ret;
578 }
579
580 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
581 ret = atsr_parse_dev(atsr);
582 if (ret)
583 return ret;
584 }
585 }
586#endif
587
588 return ret;
589}
590
591
592int __init dmar_table_init(void)
593{
594 static int dmar_table_initialized;
595 int ret;
596
597 if (dmar_table_initialized)
598 return 0;
599
600 dmar_table_initialized = 1;
601
602 ret = parse_dmar_table();
603 if (ret) {
604 if (ret != -ENODEV)
605 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
606 return ret;
607 }
608
609 if (list_empty(&dmar_drhd_units)) {
610 printk(KERN_INFO PREFIX "No DMAR devices found\n");
611 return -ENODEV;
612 }
613
614#ifdef CONFIG_DMAR
615 if (list_empty(&dmar_rmrr_units))
616 printk(KERN_INFO PREFIX "No RMRR found\n");
617
618 if (list_empty(&dmar_atsr_units))
619 printk(KERN_INFO PREFIX "No ATSR found\n");
620#endif
621
622 return 0;
623}
624
625static void warn_invalid_dmar(u64 addr, const char *message)
626{
627 WARN_TAINT_ONCE(
628 1, TAINT_FIRMWARE_WORKAROUND,
629 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
630 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
631 addr, message,
632 dmi_get_system_info(DMI_BIOS_VENDOR),
633 dmi_get_system_info(DMI_BIOS_VERSION),
634 dmi_get_system_info(DMI_PRODUCT_VERSION));
635}
636
637int __init check_zero_address(void)
638{
639 struct acpi_table_dmar *dmar;
640 struct acpi_dmar_header *entry_header;
641 struct acpi_dmar_hardware_unit *drhd;
642
643 dmar = (struct acpi_table_dmar *)dmar_tbl;
644 entry_header = (struct acpi_dmar_header *)(dmar + 1);
645
646 while (((unsigned long)entry_header) <
647 (((unsigned long)dmar) + dmar_tbl->length)) {
648
649 if (entry_header->length == 0) {
650 printk(KERN_WARNING PREFIX
651 "Invalid 0-length structure\n");
652 return 0;
653 }
654
655 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
656 void __iomem *addr;
657 u64 cap, ecap;
658
659 drhd = (void *)entry_header;
660 if (!drhd->address) {
661 warn_invalid_dmar(0, "");
662 goto failed;
663 }
664
665 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
666 if (!addr ) {
667 printk("IOMMU: can't validate: %llx\n", drhd->address);
668 goto failed;
669 }
670 cap = dmar_readq(addr + DMAR_CAP_REG);
671 ecap = dmar_readq(addr + DMAR_ECAP_REG);
672 early_iounmap(addr, VTD_PAGE_SIZE);
673 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
674 warn_invalid_dmar(drhd->address,
675 " returns all ones");
676 goto failed;
677 }
678 }
679
680 entry_header = ((void *)entry_header + entry_header->length);
681 }
682 return 1;
683
684failed:
685#ifdef CONFIG_DMAR
686 dmar_disabled = 1;
687#endif
688 return 0;
689}
690
691int __init detect_intel_iommu(void)
692{
693 int ret;
694
695 ret = dmar_table_detect();
696 if (ret)
697 ret = check_zero_address();
698 {
699#ifdef CONFIG_INTR_REMAP
700 struct acpi_table_dmar *dmar;
701
702 dmar = (struct acpi_table_dmar *) dmar_tbl;
703 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
704 printk(KERN_INFO
705 "Queued invalidation will be enabled to support "
706 "x2apic and Intr-remapping.\n");
707#endif
708#ifdef CONFIG_DMAR
709 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
710 iommu_detected = 1;
711
712 pci_request_acs();
713 }
714#endif
715#ifdef CONFIG_X86
716 if (ret)
717 x86_init.iommu.iommu_init = intel_iommu_init;
718#endif
719 }
720 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
721 dmar_tbl = NULL;
722
723 return ret ? 1 : -ENODEV;
724}
725
726
727int alloc_iommu(struct dmar_drhd_unit *drhd)
728{
729 struct intel_iommu *iommu;
730 int map_size;
731 u32 ver;
732 static int iommu_allocated = 0;
733 int agaw = 0;
734 int msagaw = 0;
735
736 if (!drhd->reg_base_addr) {
737 warn_invalid_dmar(0, "");
738 return -EINVAL;
739 }
740
741 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
742 if (!iommu)
743 return -ENOMEM;
744
745 iommu->seq_id = iommu_allocated++;
746 sprintf (iommu->name, "dmar%d", iommu->seq_id);
747
748 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
749 if (!iommu->reg) {
750 printk(KERN_ERR "IOMMU: can't map the region\n");
751 goto error;
752 }
753 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
754 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
755
756 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
757 warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
758 goto err_unmap;
759 }
760
761#ifdef CONFIG_DMAR
762 agaw = iommu_calculate_agaw(iommu);
763 if (agaw < 0) {
764 printk(KERN_ERR
765 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
766 iommu->seq_id);
767 goto err_unmap;
768 }
769 msagaw = iommu_calculate_max_sagaw(iommu);
770 if (msagaw < 0) {
771 printk(KERN_ERR
772 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
773 iommu->seq_id);
774 goto err_unmap;
775 }
776#endif
777 iommu->agaw = agaw;
778 iommu->msagaw = msagaw;
779
780 iommu->node = -1;
781
782
783 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
784 cap_max_fault_reg_offset(iommu->cap));
785 map_size = VTD_PAGE_ALIGN(map_size);
786 if (map_size > VTD_PAGE_SIZE) {
787 iounmap(iommu->reg);
788 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
789 if (!iommu->reg) {
790 printk(KERN_ERR "IOMMU: can't map the region\n");
791 goto error;
792 }
793 }
794
795 ver = readl(iommu->reg + DMAR_VER_REG);
796 pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
797 iommu->seq_id,
798 (unsigned long long)drhd->reg_base_addr,
799 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
800 (unsigned long long)iommu->cap,
801 (unsigned long long)iommu->ecap);
802
803 spin_lock_init(&iommu->register_lock);
804
805 drhd->iommu = iommu;
806 return 0;
807
808 err_unmap:
809 iounmap(iommu->reg);
810 error:
811 kfree(iommu);
812 return -1;
813}
814
815void free_iommu(struct intel_iommu *iommu)
816{
817 if (!iommu)
818 return;
819
820#ifdef CONFIG_DMAR
821 free_dmar_iommu(iommu);
822#endif
823
824 if (iommu->reg)
825 iounmap(iommu->reg);
826 kfree(iommu);
827}
828
829
830
831
832static inline void reclaim_free_desc(struct q_inval *qi)
833{
834 while (qi->desc_status[qi->free_tail] == QI_DONE ||
835 qi->desc_status[qi->free_tail] == QI_ABORT) {
836 qi->desc_status[qi->free_tail] = QI_FREE;
837 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
838 qi->free_cnt++;
839 }
840}
841
842static int qi_check_fault(struct intel_iommu *iommu, int index)
843{
844 u32 fault;
845 int head, tail;
846 struct q_inval *qi = iommu->qi;
847 int wait_index = (index + 1) % QI_LENGTH;
848
849 if (qi->desc_status[wait_index] == QI_ABORT)
850 return -EAGAIN;
851
852 fault = readl(iommu->reg + DMAR_FSTS_REG);
853
854
855
856
857
858
859 if (fault & DMA_FSTS_IQE) {
860 head = readl(iommu->reg + DMAR_IQH_REG);
861 if ((head >> DMAR_IQ_SHIFT) == index) {
862 printk(KERN_ERR "VT-d detected invalid descriptor: "
863 "low=%llx, high=%llx\n",
864 (unsigned long long)qi->desc[index].low,
865 (unsigned long long)qi->desc[index].high);
866 memcpy(&qi->desc[index], &qi->desc[wait_index],
867 sizeof(struct qi_desc));
868 __iommu_flush_cache(iommu, &qi->desc[index],
869 sizeof(struct qi_desc));
870 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
871 return -EINVAL;
872 }
873 }
874
875
876
877
878
879 if (fault & DMA_FSTS_ITE) {
880 head = readl(iommu->reg + DMAR_IQH_REG);
881 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
882 head |= 1;
883 tail = readl(iommu->reg + DMAR_IQT_REG);
884 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
885
886 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
887
888 do {
889 if (qi->desc_status[head] == QI_IN_USE)
890 qi->desc_status[head] = QI_ABORT;
891 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
892 } while (head != tail);
893
894 if (qi->desc_status[wait_index] == QI_ABORT)
895 return -EAGAIN;
896 }
897
898 if (fault & DMA_FSTS_ICE)
899 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
900
901 return 0;
902}
903
904
905
906
907
908int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
909{
910 int rc;
911 struct q_inval *qi = iommu->qi;
912 struct qi_desc *hw, wait_desc;
913 int wait_index, index;
914 unsigned long flags;
915
916 if (!qi)
917 return 0;
918
919 hw = qi->desc;
920
921restart:
922 rc = 0;
923
924 spin_lock_irqsave(&qi->q_lock, flags);
925 while (qi->free_cnt < 3) {
926 spin_unlock_irqrestore(&qi->q_lock, flags);
927 cpu_relax();
928 spin_lock_irqsave(&qi->q_lock, flags);
929 }
930
931 index = qi->free_head;
932 wait_index = (index + 1) % QI_LENGTH;
933
934 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
935
936 hw[index] = *desc;
937
938 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
939 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
940 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
941
942 hw[wait_index] = wait_desc;
943
944 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
945 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
946
947 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
948 qi->free_cnt -= 2;
949
950
951
952
953
954 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
955
956 while (qi->desc_status[wait_index] != QI_DONE) {
957
958
959
960
961
962
963
964 rc = qi_check_fault(iommu, index);
965 if (rc)
966 break;
967
968 spin_unlock(&qi->q_lock);
969 cpu_relax();
970 spin_lock(&qi->q_lock);
971 }
972
973 qi->desc_status[index] = QI_DONE;
974
975 reclaim_free_desc(qi);
976 spin_unlock_irqrestore(&qi->q_lock, flags);
977
978 if (rc == -EAGAIN)
979 goto restart;
980
981 return rc;
982}
983
984
985
986
987void qi_global_iec(struct intel_iommu *iommu)
988{
989 struct qi_desc desc;
990
991 desc.low = QI_IEC_TYPE;
992 desc.high = 0;
993
994
995 qi_submit_sync(&desc, iommu);
996}
997
998void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
999 u64 type)
1000{
1001 struct qi_desc desc;
1002
1003 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1004 | QI_CC_GRAN(type) | QI_CC_TYPE;
1005 desc.high = 0;
1006
1007 qi_submit_sync(&desc, iommu);
1008}
1009
1010void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1011 unsigned int size_order, u64 type)
1012{
1013 u8 dw = 0, dr = 0;
1014
1015 struct qi_desc desc;
1016 int ih = 0;
1017
1018 if (cap_write_drain(iommu->cap))
1019 dw = 1;
1020
1021 if (cap_read_drain(iommu->cap))
1022 dr = 1;
1023
1024 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1025 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1026 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1027 | QI_IOTLB_AM(size_order);
1028
1029 qi_submit_sync(&desc, iommu);
1030}
1031
1032void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1033 u64 addr, unsigned mask)
1034{
1035 struct qi_desc desc;
1036
1037 if (mask) {
1038 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1039 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1040 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1041 } else
1042 desc.high = QI_DEV_IOTLB_ADDR(addr);
1043
1044 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1045 qdep = 0;
1046
1047 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1048 QI_DIOTLB_TYPE;
1049
1050 qi_submit_sync(&desc, iommu);
1051}
1052
1053
1054
1055
1056void dmar_disable_qi(struct intel_iommu *iommu)
1057{
1058 unsigned long flags;
1059 u32 sts;
1060 cycles_t start_time = get_cycles();
1061
1062 if (!ecap_qis(iommu->ecap))
1063 return;
1064
1065 spin_lock_irqsave(&iommu->register_lock, flags);
1066
1067 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
1068 if (!(sts & DMA_GSTS_QIES))
1069 goto end;
1070
1071
1072
1073
1074 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1075 readl(iommu->reg + DMAR_IQH_REG)) &&
1076 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1077 cpu_relax();
1078
1079 iommu->gcmd &= ~DMA_GCMD_QIE;
1080 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1081
1082 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1083 !(sts & DMA_GSTS_QIES), sts);
1084end:
1085 spin_unlock_irqrestore(&iommu->register_lock, flags);
1086}
1087
1088
1089
1090
1091static void __dmar_enable_qi(struct intel_iommu *iommu)
1092{
1093 u32 sts;
1094 unsigned long flags;
1095 struct q_inval *qi = iommu->qi;
1096
1097 qi->free_head = qi->free_tail = 0;
1098 qi->free_cnt = QI_LENGTH;
1099
1100 spin_lock_irqsave(&iommu->register_lock, flags);
1101
1102
1103 writel(0, iommu->reg + DMAR_IQT_REG);
1104
1105 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1106
1107 iommu->gcmd |= DMA_GCMD_QIE;
1108 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1109
1110
1111 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1112
1113 spin_unlock_irqrestore(&iommu->register_lock, flags);
1114}
1115
1116
1117
1118
1119
1120
1121int dmar_enable_qi(struct intel_iommu *iommu)
1122{
1123 struct q_inval *qi;
1124 struct page *desc_page;
1125
1126 if (!ecap_qis(iommu->ecap))
1127 return -ENOENT;
1128
1129
1130
1131
1132 if (iommu->qi)
1133 return 0;
1134
1135 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1136 if (!iommu->qi)
1137 return -ENOMEM;
1138
1139 qi = iommu->qi;
1140
1141
1142 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1143 if (!desc_page) {
1144 kfree(qi);
1145 iommu->qi = 0;
1146 return -ENOMEM;
1147 }
1148
1149 qi->desc = page_address(desc_page);
1150
1151 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1152 if (!qi->desc_status) {
1153 free_page((unsigned long) qi->desc);
1154 kfree(qi);
1155 iommu->qi = 0;
1156 return -ENOMEM;
1157 }
1158
1159 qi->free_head = qi->free_tail = 0;
1160 qi->free_cnt = QI_LENGTH;
1161
1162 spin_lock_init(&qi->q_lock);
1163
1164 __dmar_enable_qi(iommu);
1165
1166 return 0;
1167}
1168
1169
1170
1171enum faulttype {
1172 DMA_REMAP,
1173 INTR_REMAP,
1174 UNKNOWN,
1175};
1176
1177static const char *dma_remap_fault_reasons[] =
1178{
1179 "Software",
1180 "Present bit in root entry is clear",
1181 "Present bit in context entry is clear",
1182 "Invalid context entry",
1183 "Access beyond MGAW",
1184 "PTE Write access is not set",
1185 "PTE Read access is not set",
1186 "Next page table ptr is invalid",
1187 "Root table address invalid",
1188 "Context table ptr is invalid",
1189 "non-zero reserved fields in RTP",
1190 "non-zero reserved fields in CTP",
1191 "non-zero reserved fields in PTE",
1192};
1193
1194static const char *intr_remap_fault_reasons[] =
1195{
1196 "Detected reserved fields in the decoded interrupt-remapped request",
1197 "Interrupt index exceeded the interrupt-remapping table size",
1198 "Present field in the IRTE entry is clear",
1199 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1200 "Detected reserved fields in the IRTE entry",
1201 "Blocked a compatibility format interrupt request",
1202 "Blocked an interrupt request due to source-id verification failure",
1203};
1204
1205#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1206
1207const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1208{
1209 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1210 ARRAY_SIZE(intr_remap_fault_reasons))) {
1211 *fault_type = INTR_REMAP;
1212 return intr_remap_fault_reasons[fault_reason - 0x20];
1213 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1214 *fault_type = DMA_REMAP;
1215 return dma_remap_fault_reasons[fault_reason];
1216 } else {
1217 *fault_type = UNKNOWN;
1218 return "Unknown";
1219 }
1220}
1221
1222void dmar_msi_unmask(struct irq_data *data)
1223{
1224 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1225 unsigned long flag;
1226
1227
1228 spin_lock_irqsave(&iommu->register_lock, flag);
1229 writel(0, iommu->reg + DMAR_FECTL_REG);
1230
1231 readl(iommu->reg + DMAR_FECTL_REG);
1232 spin_unlock_irqrestore(&iommu->register_lock, flag);
1233}
1234
1235void dmar_msi_mask(struct irq_data *data)
1236{
1237 unsigned long flag;
1238 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1239
1240
1241 spin_lock_irqsave(&iommu->register_lock, flag);
1242 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1243
1244 readl(iommu->reg + DMAR_FECTL_REG);
1245 spin_unlock_irqrestore(&iommu->register_lock, flag);
1246}
1247
1248void dmar_msi_write(int irq, struct msi_msg *msg)
1249{
1250 struct intel_iommu *iommu = irq_get_handler_data(irq);
1251 unsigned long flag;
1252
1253 spin_lock_irqsave(&iommu->register_lock, flag);
1254 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1255 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1256 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1257 spin_unlock_irqrestore(&iommu->register_lock, flag);
1258}
1259
1260void dmar_msi_read(int irq, struct msi_msg *msg)
1261{
1262 struct intel_iommu *iommu = irq_get_handler_data(irq);
1263 unsigned long flag;
1264
1265 spin_lock_irqsave(&iommu->register_lock, flag);
1266 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1267 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1268 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1269 spin_unlock_irqrestore(&iommu->register_lock, flag);
1270}
1271
1272static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1273 u8 fault_reason, u16 source_id, unsigned long long addr)
1274{
1275 const char *reason;
1276 int fault_type;
1277
1278 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1279
1280 if (fault_type == INTR_REMAP)
1281 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1282 "fault index %llx\n"
1283 "INTR-REMAP:[fault reason %02d] %s\n",
1284 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1285 PCI_FUNC(source_id & 0xFF), addr >> 48,
1286 fault_reason, reason);
1287 else
1288 printk(KERN_ERR
1289 "DMAR:[%s] Request device [%02x:%02x.%d] "
1290 "fault addr %llx \n"
1291 "DMAR:[fault reason %02d] %s\n",
1292 (type ? "DMA Read" : "DMA Write"),
1293 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1294 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1295 return 0;
1296}
1297
1298#define PRIMARY_FAULT_REG_LEN (16)
1299irqreturn_t dmar_fault(int irq, void *dev_id)
1300{
1301 struct intel_iommu *iommu = dev_id;
1302 int reg, fault_index;
1303 u32 fault_status;
1304 unsigned long flag;
1305
1306 spin_lock_irqsave(&iommu->register_lock, flag);
1307 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1308 if (fault_status)
1309 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1310 fault_status);
1311
1312
1313 if (!(fault_status & DMA_FSTS_PPF))
1314 goto clear_rest;
1315
1316 fault_index = dma_fsts_fault_record_index(fault_status);
1317 reg = cap_fault_reg_offset(iommu->cap);
1318 while (1) {
1319 u8 fault_reason;
1320 u16 source_id;
1321 u64 guest_addr;
1322 int type;
1323 u32 data;
1324
1325
1326 data = readl(iommu->reg + reg +
1327 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1328 if (!(data & DMA_FRCD_F))
1329 break;
1330
1331 fault_reason = dma_frcd_fault_reason(data);
1332 type = dma_frcd_type(data);
1333
1334 data = readl(iommu->reg + reg +
1335 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1336 source_id = dma_frcd_source_id(data);
1337
1338 guest_addr = dmar_readq(iommu->reg + reg +
1339 fault_index * PRIMARY_FAULT_REG_LEN);
1340 guest_addr = dma_frcd_page_addr(guest_addr);
1341
1342 writel(DMA_FRCD_F, iommu->reg + reg +
1343 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1344
1345 spin_unlock_irqrestore(&iommu->register_lock, flag);
1346
1347 dmar_fault_do_one(iommu, type, fault_reason,
1348 source_id, guest_addr);
1349
1350 fault_index++;
1351 if (fault_index >= cap_num_fault_regs(iommu->cap))
1352 fault_index = 0;
1353 spin_lock_irqsave(&iommu->register_lock, flag);
1354 }
1355clear_rest:
1356
1357 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1358 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1359
1360 spin_unlock_irqrestore(&iommu->register_lock, flag);
1361 return IRQ_HANDLED;
1362}
1363
1364int dmar_set_interrupt(struct intel_iommu *iommu)
1365{
1366 int irq, ret;
1367
1368
1369
1370
1371 if (iommu->irq)
1372 return 0;
1373
1374 irq = create_irq();
1375 if (!irq) {
1376 printk(KERN_ERR "IOMMU: no free vectors\n");
1377 return -EINVAL;
1378 }
1379
1380 irq_set_handler_data(irq, iommu);
1381 iommu->irq = irq;
1382
1383 ret = arch_setup_dmar_msi(irq);
1384 if (ret) {
1385 irq_set_handler_data(irq, NULL);
1386 iommu->irq = 0;
1387 destroy_irq(irq);
1388 return ret;
1389 }
1390
1391 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1392 if (ret)
1393 printk(KERN_ERR "IOMMU: can't request irq\n");
1394 return ret;
1395}
1396
1397int __init enable_drhd_fault_handling(void)
1398{
1399 struct dmar_drhd_unit *drhd;
1400
1401
1402
1403
1404 for_each_drhd_unit(drhd) {
1405 int ret;
1406 struct intel_iommu *iommu = drhd->iommu;
1407 ret = dmar_set_interrupt(iommu);
1408
1409 if (ret) {
1410 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1411 " interrupt, ret %d\n",
1412 (unsigned long long)drhd->reg_base_addr, ret);
1413 return -1;
1414 }
1415
1416
1417
1418
1419 dmar_fault(iommu->irq, iommu);
1420 }
1421
1422 return 0;
1423}
1424
1425
1426
1427
1428int dmar_reenable_qi(struct intel_iommu *iommu)
1429{
1430 if (!ecap_qis(iommu->ecap))
1431 return -ENOENT;
1432
1433 if (!iommu->qi)
1434 return -ENOENT;
1435
1436
1437
1438
1439 dmar_disable_qi(iommu);
1440
1441
1442
1443
1444
1445 __dmar_enable_qi(iommu);
1446
1447 return 0;
1448}
1449
1450
1451
1452
1453int __init dmar_ir_support(void)
1454{
1455 struct acpi_table_dmar *dmar;
1456 dmar = (struct acpi_table_dmar *)dmar_tbl;
1457 if (!dmar)
1458 return 0;
1459 return dmar->flags & 0x1;
1460}
1461IOMMU_INIT_POST(detect_intel_iommu);
1462