1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/pci.h>
30#include <linux/dmar.h>
31#include <linux/iova.h>
32#include <linux/intel-iommu.h>
33#include <linux/timer.h>
34#include <linux/irq.h>
35#include <linux/interrupt.h>
36#include <linux/tboot.h>
37#include <linux/dmi.h>
38
39#define PREFIX "DMAR: "
40
41
42
43
44
45LIST_HEAD(dmar_drhd_units);
46
47static struct acpi_table_header * __initdata dmar_tbl;
48static acpi_size dmar_tbl_size;
49
50static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
51{
52
53
54
55
56 if (drhd->include_all)
57 list_add_tail(&drhd->list, &dmar_drhd_units);
58 else
59 list_add(&drhd->list, &dmar_drhd_units);
60}
61
62static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
63 struct pci_dev **dev, u16 segment)
64{
65 struct pci_bus *bus;
66 struct pci_dev *pdev = NULL;
67 struct acpi_dmar_pci_path *path;
68 int count;
69
70 bus = pci_find_bus(segment, scope->bus);
71 path = (struct acpi_dmar_pci_path *)(scope + 1);
72 count = (scope->length - sizeof(struct acpi_dmar_device_scope))
73 / sizeof(struct acpi_dmar_pci_path);
74
75 while (count) {
76 if (pdev)
77 pci_dev_put(pdev);
78
79
80
81
82 if (!bus) {
83 printk(KERN_WARNING
84 PREFIX "Device scope bus [%d] not found\n",
85 scope->bus);
86 break;
87 }
88 pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
89 if (!pdev) {
90 printk(KERN_WARNING PREFIX
91 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
92 segment, bus->number, path->dev, path->fn);
93 break;
94 }
95 path ++;
96 count --;
97 bus = pdev->subordinate;
98 }
99 if (!pdev) {
100 printk(KERN_WARNING PREFIX
101 "Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->dev, path->fn);
103 *dev = NULL;
104 return 0;
105 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
107 pdev->subordinate) || (scope->entry_type == \
108 ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
109 pci_dev_put(pdev);
110 printk(KERN_WARNING PREFIX
111 "Device scope type does not match for %s\n",
112 pci_name(pdev));
113 return -EINVAL;
114 }
115 *dev = pdev;
116 return 0;
117}
118
119static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
120 struct pci_dev ***devices, u16 segment)
121{
122 struct acpi_dmar_device_scope *scope;
123 void * tmp = start;
124 int index;
125 int ret;
126
127 *cnt = 0;
128 while (start < end) {
129 scope = start;
130 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
131 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
132 (*cnt)++;
133 else
134 printk(KERN_WARNING PREFIX
135 "Unsupported device scope\n");
136 start += scope->length;
137 }
138 if (*cnt == 0)
139 return 0;
140
141 *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL);
142 if (!*devices)
143 return -ENOMEM;
144
145 start = tmp;
146 index = 0;
147 while (start < end) {
148 scope = start;
149 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
150 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) {
151 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment);
153 if (ret) {
154 kfree(*devices);
155 return ret;
156 }
157 index ++;
158 }
159 start += scope->length;
160 }
161
162 return 0;
163}
164
165
166
167
168
169
170static int __init
171dmar_parse_one_drhd(struct acpi_dmar_header *header)
172{
173 struct acpi_dmar_hardware_unit *drhd;
174 struct dmar_drhd_unit *dmaru;
175 int ret = 0;
176
177 drhd = (struct acpi_dmar_hardware_unit *)header;
178 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL);
179 if (!dmaru)
180 return -ENOMEM;
181
182 dmaru->hdr = header;
183 dmaru->reg_base_addr = drhd->address;
184 dmaru->segment = drhd->segment;
185 dmaru->include_all = drhd->flags & 0x1;
186
187 ret = alloc_iommu(dmaru);
188 if (ret) {
189 kfree(dmaru);
190 return ret;
191 }
192 dmar_register_drhd_unit(dmaru);
193 return 0;
194}
195
196static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
197{
198 struct acpi_dmar_hardware_unit *drhd;
199 int ret = 0;
200
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202
203 if (dmaru->include_all)
204 return 0;
205
206 ret = dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment);
210 if (ret) {
211 list_del(&dmaru->list);
212 kfree(dmaru);
213 }
214 return ret;
215}
216
217#ifdef CONFIG_DMAR
218LIST_HEAD(dmar_rmrr_units);
219
220static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr)
221{
222 list_add(&rmrr->list, &dmar_rmrr_units);
223}
224
225
226static int __init
227dmar_parse_one_rmrr(struct acpi_dmar_header *header)
228{
229 struct acpi_dmar_reserved_memory *rmrr;
230 struct dmar_rmrr_unit *rmrru;
231
232 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
233 if (!rmrru)
234 return -ENOMEM;
235
236 rmrru->hdr = header;
237 rmrr = (struct acpi_dmar_reserved_memory *)header;
238 rmrru->base_address = rmrr->base_address;
239 rmrru->end_address = rmrr->end_address;
240
241 dmar_register_rmrr_unit(rmrru);
242 return 0;
243}
244
245static int __init
246rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
247{
248 struct acpi_dmar_reserved_memory *rmrr;
249 int ret;
250
251 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
252 ret = dmar_parse_dev_scope((void *)(rmrr + 1),
253 ((void *)rmrr) + rmrr->header.length,
254 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
255
256 if (ret || (rmrru->devices_cnt == 0)) {
257 list_del(&rmrru->list);
258 kfree(rmrru);
259 }
260 return ret;
261}
262
263static LIST_HEAD(dmar_atsr_units);
264
265static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
266{
267 struct acpi_dmar_atsr *atsr;
268 struct dmar_atsr_unit *atsru;
269
270 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
271 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
272 if (!atsru)
273 return -ENOMEM;
274
275 atsru->hdr = hdr;
276 atsru->include_all = atsr->flags & 0x1;
277
278 list_add(&atsru->list, &dmar_atsr_units);
279
280 return 0;
281}
282
283static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
284{
285 int rc;
286 struct acpi_dmar_atsr *atsr;
287
288 if (atsru->include_all)
289 return 0;
290
291 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
292 rc = dmar_parse_dev_scope((void *)(atsr + 1),
293 (void *)atsr + atsr->header.length,
294 &atsru->devices_cnt, &atsru->devices,
295 atsr->segment);
296 if (rc || !atsru->devices_cnt) {
297 list_del(&atsru->list);
298 kfree(atsru);
299 }
300
301 return rc;
302}
303
304int dmar_find_matched_atsr_unit(struct pci_dev *dev)
305{
306 int i;
307 struct pci_bus *bus;
308 struct acpi_dmar_atsr *atsr;
309 struct dmar_atsr_unit *atsru;
310
311 list_for_each_entry(atsru, &dmar_atsr_units, list) {
312 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
313 if (atsr->segment == pci_domain_nr(dev->bus))
314 goto found;
315 }
316
317 return 0;
318
319found:
320 for (bus = dev->bus; bus; bus = bus->parent) {
321 struct pci_dev *bridge = bus->self;
322
323 if (!bridge || !bridge->is_pcie ||
324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325 return 0;
326
327 if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
328 for (i = 0; i < atsru->devices_cnt; i++)
329 if (atsru->devices[i] == bridge)
330 return 1;
331 break;
332 }
333 }
334
335 if (atsru->include_all)
336 return 1;
337
338 return 0;
339}
340#endif
341
342static void __init
343dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
344{
345 struct acpi_dmar_hardware_unit *drhd;
346 struct acpi_dmar_reserved_memory *rmrr;
347 struct acpi_dmar_atsr *atsr;
348 struct acpi_dmar_rhsa *rhsa;
349
350 switch (header->type) {
351 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
352 drhd = container_of(header, struct acpi_dmar_hardware_unit,
353 header);
354 printk (KERN_INFO PREFIX
355 "DRHD base: %#016Lx flags: %#x\n",
356 (unsigned long long)drhd->address, drhd->flags);
357 break;
358 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
359 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
360 header);
361 printk (KERN_INFO PREFIX
362 "RMRR base: %#016Lx end: %#016Lx\n",
363 (unsigned long long)rmrr->base_address,
364 (unsigned long long)rmrr->end_address);
365 break;
366 case ACPI_DMAR_TYPE_ATSR:
367 atsr = container_of(header, struct acpi_dmar_atsr, header);
368 printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
369 break;
370 case ACPI_DMAR_HARDWARE_AFFINITY:
371 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
372 printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
373 (unsigned long long)rhsa->base_address,
374 rhsa->proximity_domain);
375 break;
376 }
377}
378
379
380
381
382static int __init dmar_table_detect(void)
383{
384 acpi_status status = AE_OK;
385
386
387 status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
388 (struct acpi_table_header **)&dmar_tbl,
389 &dmar_tbl_size);
390
391 if (ACPI_SUCCESS(status) && !dmar_tbl) {
392 printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
393 status = AE_NOT_FOUND;
394 }
395
396 return (ACPI_SUCCESS(status) ? 1 : 0);
397}
398
399
400
401
402static int __init
403parse_dmar_table(void)
404{
405 struct acpi_table_dmar *dmar;
406 struct acpi_dmar_header *entry_header;
407 int ret = 0;
408
409
410
411
412
413 dmar_table_detect();
414
415
416
417
418
419 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
420
421 dmar = (struct acpi_table_dmar *)dmar_tbl;
422 if (!dmar)
423 return -ENODEV;
424
425 if (dmar->width < PAGE_SHIFT - 1) {
426 printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
427 return -EINVAL;
428 }
429
430 printk (KERN_INFO PREFIX "Host address width %d\n",
431 dmar->width + 1);
432
433 entry_header = (struct acpi_dmar_header *)(dmar + 1);
434 while (((unsigned long)entry_header) <
435 (((unsigned long)dmar) + dmar_tbl->length)) {
436
437 if (entry_header->length == 0) {
438 printk(KERN_WARNING PREFIX
439 "Invalid 0-length structure\n");
440 ret = -EINVAL;
441 break;
442 }
443
444 dmar_table_print_dmar_entry(entry_header);
445
446 switch (entry_header->type) {
447 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
448 ret = dmar_parse_one_drhd(entry_header);
449 break;
450 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
451#ifdef CONFIG_DMAR
452 ret = dmar_parse_one_rmrr(entry_header);
453#endif
454 break;
455 case ACPI_DMAR_TYPE_ATSR:
456#ifdef CONFIG_DMAR
457 ret = dmar_parse_one_atsr(entry_header);
458#endif
459 break;
460 case ACPI_DMAR_HARDWARE_AFFINITY:
461
462 break;
463 default:
464 printk(KERN_WARNING PREFIX
465 "Unknown DMAR structure type %d\n",
466 entry_header->type);
467 ret = 0;
468 break;
469 }
470 if (ret)
471 break;
472
473 entry_header = ((void *)entry_header + entry_header->length);
474 }
475 return ret;
476}
477
478int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
479 struct pci_dev *dev)
480{
481 int index;
482
483 while (dev) {
484 for (index = 0; index < cnt; index++)
485 if (dev == devices[index])
486 return 1;
487
488
489 dev = dev->bus->self;
490 }
491
492 return 0;
493}
494
495struct dmar_drhd_unit *
496dmar_find_matched_drhd_unit(struct pci_dev *dev)
497{
498 struct dmar_drhd_unit *dmaru = NULL;
499 struct acpi_dmar_hardware_unit *drhd;
500
501 list_for_each_entry(dmaru, &dmar_drhd_units, list) {
502 drhd = container_of(dmaru->hdr,
503 struct acpi_dmar_hardware_unit,
504 header);
505
506 if (dmaru->include_all &&
507 drhd->segment == pci_domain_nr(dev->bus))
508 return dmaru;
509
510 if (dmar_pci_device_match(dmaru->devices,
511 dmaru->devices_cnt, dev))
512 return dmaru;
513 }
514
515 return NULL;
516}
517
518int __init dmar_dev_scope_init(void)
519{
520 struct dmar_drhd_unit *drhd, *drhd_n;
521 int ret = -ENODEV;
522
523 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
524 ret = dmar_parse_dev(drhd);
525 if (ret)
526 return ret;
527 }
528
529#ifdef CONFIG_DMAR
530 {
531 struct dmar_rmrr_unit *rmrr, *rmrr_n;
532 struct dmar_atsr_unit *atsr, *atsr_n;
533
534 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
535 ret = rmrr_parse_dev(rmrr);
536 if (ret)
537 return ret;
538 }
539
540 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
541 ret = atsr_parse_dev(atsr);
542 if (ret)
543 return ret;
544 }
545 }
546#endif
547
548 return ret;
549}
550
551
552int __init dmar_table_init(void)
553{
554 static int dmar_table_initialized;
555 int ret;
556
557 if (dmar_table_initialized)
558 return 0;
559
560 dmar_table_initialized = 1;
561
562 ret = parse_dmar_table();
563 if (ret) {
564 if (ret != -ENODEV)
565 printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
566 return ret;
567 }
568
569 if (list_empty(&dmar_drhd_units)) {
570 printk(KERN_INFO PREFIX "No DMAR devices found\n");
571 return -ENODEV;
572 }
573
574#ifdef CONFIG_DMAR
575 if (list_empty(&dmar_rmrr_units))
576 printk(KERN_INFO PREFIX "No RMRR found\n");
577
578 if (list_empty(&dmar_atsr_units))
579 printk(KERN_INFO PREFIX "No ATSR found\n");
580#endif
581
582 return 0;
583}
584
585int __init check_zero_address(void)
586{
587 struct acpi_table_dmar *dmar;
588 struct acpi_dmar_header *entry_header;
589 struct acpi_dmar_hardware_unit *drhd;
590
591 dmar = (struct acpi_table_dmar *)dmar_tbl;
592 entry_header = (struct acpi_dmar_header *)(dmar + 1);
593
594 while (((unsigned long)entry_header) <
595 (((unsigned long)dmar) + dmar_tbl->length)) {
596
597 if (entry_header->length == 0) {
598 printk(KERN_WARNING PREFIX
599 "Invalid 0-length structure\n");
600 return 0;
601 }
602
603 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) {
604 drhd = (void *)entry_header;
605 if (!drhd->address) {
606
607 WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n"
608 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
609 dmi_get_system_info(DMI_BIOS_VENDOR),
610 dmi_get_system_info(DMI_BIOS_VERSION),
611 dmi_get_system_info(DMI_PRODUCT_VERSION));
612#ifdef CONFIG_DMAR
613 dmar_disabled = 1;
614#endif
615 return 0;
616 }
617 break;
618 }
619
620 entry_header = ((void *)entry_header + entry_header->length);
621 }
622 return 1;
623}
624
625void __init detect_intel_iommu(void)
626{
627 int ret;
628
629 ret = dmar_table_detect();
630 if (ret)
631 ret = check_zero_address();
632 {
633#ifdef CONFIG_INTR_REMAP
634 struct acpi_table_dmar *dmar;
635
636
637
638
639
640
641 dmar = (struct acpi_table_dmar *) dmar_tbl;
642 if (ret && cpu_has_x2apic && dmar->flags & 0x1)
643 printk(KERN_INFO
644 "Queued invalidation will be enabled to support "
645 "x2apic and Intr-remapping.\n");
646#endif
647#ifdef CONFIG_DMAR
648 if (ret && !no_iommu && !iommu_detected && !swiotlb &&
649 !dmar_disabled)
650 iommu_detected = 1;
651#endif
652 }
653 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
654 dmar_tbl = NULL;
655}
656
657
658int alloc_iommu(struct dmar_drhd_unit *drhd)
659{
660 struct intel_iommu *iommu;
661 int map_size;
662 u32 ver;
663 static int iommu_allocated = 0;
664 int agaw = 0;
665 int msagaw = 0;
666
667 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
668 if (!iommu)
669 return -ENOMEM;
670
671 iommu->seq_id = iommu_allocated++;
672 sprintf (iommu->name, "dmar%d", iommu->seq_id);
673
674 iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
675 if (!iommu->reg) {
676 printk(KERN_ERR "IOMMU: can't map the region\n");
677 goto error;
678 }
679 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
680 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
681
682 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
683
684 WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
685 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
686 drhd->reg_base_addr,
687 dmi_get_system_info(DMI_BIOS_VENDOR),
688 dmi_get_system_info(DMI_BIOS_VERSION),
689 dmi_get_system_info(DMI_PRODUCT_VERSION));
690 goto err_unmap;
691 }
692
693#ifdef CONFIG_DMAR
694 agaw = iommu_calculate_agaw(iommu);
695 if (agaw < 0) {
696 printk(KERN_ERR
697 "Cannot get a valid agaw for iommu (seq_id = %d)\n",
698 iommu->seq_id);
699 goto err_unmap;
700 }
701 msagaw = iommu_calculate_max_sagaw(iommu);
702 if (msagaw < 0) {
703 printk(KERN_ERR
704 "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
705 iommu->seq_id);
706 goto err_unmap;
707 }
708#endif
709 iommu->agaw = agaw;
710 iommu->msagaw = msagaw;
711
712
713 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
714 cap_max_fault_reg_offset(iommu->cap));
715 map_size = VTD_PAGE_ALIGN(map_size);
716 if (map_size > VTD_PAGE_SIZE) {
717 iounmap(iommu->reg);
718 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
719 if (!iommu->reg) {
720 printk(KERN_ERR "IOMMU: can't map the region\n");
721 goto error;
722 }
723 }
724
725 ver = readl(iommu->reg + DMAR_VER_REG);
726 pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
727 (unsigned long long)drhd->reg_base_addr,
728 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
729 (unsigned long long)iommu->cap,
730 (unsigned long long)iommu->ecap);
731
732 spin_lock_init(&iommu->register_lock);
733
734 drhd->iommu = iommu;
735 return 0;
736
737 err_unmap:
738 iounmap(iommu->reg);
739 error:
740 kfree(iommu);
741 return -1;
742}
743
744void free_iommu(struct intel_iommu *iommu)
745{
746 if (!iommu)
747 return;
748
749#ifdef CONFIG_DMAR
750 free_dmar_iommu(iommu);
751#endif
752
753 if (iommu->reg)
754 iounmap(iommu->reg);
755 kfree(iommu);
756}
757
758
759
760
761static inline void reclaim_free_desc(struct q_inval *qi)
762{
763 while (qi->desc_status[qi->free_tail] == QI_DONE ||
764 qi->desc_status[qi->free_tail] == QI_ABORT) {
765 qi->desc_status[qi->free_tail] = QI_FREE;
766 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
767 qi->free_cnt++;
768 }
769}
770
771static int qi_check_fault(struct intel_iommu *iommu, int index)
772{
773 u32 fault;
774 int head, tail;
775 struct q_inval *qi = iommu->qi;
776 int wait_index = (index + 1) % QI_LENGTH;
777
778 if (qi->desc_status[wait_index] == QI_ABORT)
779 return -EAGAIN;
780
781 fault = readl(iommu->reg + DMAR_FSTS_REG);
782
783
784
785
786
787
788 if (fault & DMA_FSTS_IQE) {
789 head = readl(iommu->reg + DMAR_IQH_REG);
790 if ((head >> DMAR_IQ_SHIFT) == index) {
791 printk(KERN_ERR "VT-d detected invalid descriptor: "
792 "low=%llx, high=%llx\n",
793 (unsigned long long)qi->desc[index].low,
794 (unsigned long long)qi->desc[index].high);
795 memcpy(&qi->desc[index], &qi->desc[wait_index],
796 sizeof(struct qi_desc));
797 __iommu_flush_cache(iommu, &qi->desc[index],
798 sizeof(struct qi_desc));
799 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
800 return -EINVAL;
801 }
802 }
803
804
805
806
807
808 if (fault & DMA_FSTS_ITE) {
809 head = readl(iommu->reg + DMAR_IQH_REG);
810 head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
811 head |= 1;
812 tail = readl(iommu->reg + DMAR_IQT_REG);
813 tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
814
815 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
816
817 do {
818 if (qi->desc_status[head] == QI_IN_USE)
819 qi->desc_status[head] = QI_ABORT;
820 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
821 } while (head != tail);
822
823 if (qi->desc_status[wait_index] == QI_ABORT)
824 return -EAGAIN;
825 }
826
827 if (fault & DMA_FSTS_ICE)
828 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
829
830 return 0;
831}
832
833
834
835
836
837int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
838{
839 int rc;
840 struct q_inval *qi = iommu->qi;
841 struct qi_desc *hw, wait_desc;
842 int wait_index, index;
843 unsigned long flags;
844
845 if (!qi)
846 return 0;
847
848 hw = qi->desc;
849
850restart:
851 rc = 0;
852
853 spin_lock_irqsave(&qi->q_lock, flags);
854 while (qi->free_cnt < 3) {
855 spin_unlock_irqrestore(&qi->q_lock, flags);
856 cpu_relax();
857 spin_lock_irqsave(&qi->q_lock, flags);
858 }
859
860 index = qi->free_head;
861 wait_index = (index + 1) % QI_LENGTH;
862
863 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
864
865 hw[index] = *desc;
866
867 wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
868 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
869 wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
870
871 hw[wait_index] = wait_desc;
872
873 __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
874 __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
875
876 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
877 qi->free_cnt -= 2;
878
879
880
881
882
883 writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
884
885 while (qi->desc_status[wait_index] != QI_DONE) {
886
887
888
889
890
891
892
893 rc = qi_check_fault(iommu, index);
894 if (rc)
895 break;
896
897 spin_unlock(&qi->q_lock);
898 cpu_relax();
899 spin_lock(&qi->q_lock);
900 }
901
902 qi->desc_status[index] = QI_DONE;
903
904 reclaim_free_desc(qi);
905 spin_unlock_irqrestore(&qi->q_lock, flags);
906
907 if (rc == -EAGAIN)
908 goto restart;
909
910 return rc;
911}
912
913
914
915
916void qi_global_iec(struct intel_iommu *iommu)
917{
918 struct qi_desc desc;
919
920 desc.low = QI_IEC_TYPE;
921 desc.high = 0;
922
923
924 qi_submit_sync(&desc, iommu);
925}
926
927void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
928 u64 type)
929{
930 struct qi_desc desc;
931
932 desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
933 | QI_CC_GRAN(type) | QI_CC_TYPE;
934 desc.high = 0;
935
936 qi_submit_sync(&desc, iommu);
937}
938
939void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
940 unsigned int size_order, u64 type)
941{
942 u8 dw = 0, dr = 0;
943
944 struct qi_desc desc;
945 int ih = 0;
946
947 if (cap_write_drain(iommu->cap))
948 dw = 1;
949
950 if (cap_read_drain(iommu->cap))
951 dr = 1;
952
953 desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
954 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
955 desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
956 | QI_IOTLB_AM(size_order);
957
958 qi_submit_sync(&desc, iommu);
959}
960
961void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
962 u64 addr, unsigned mask)
963{
964 struct qi_desc desc;
965
966 if (mask) {
967 BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
968 addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
969 desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
970 } else
971 desc.high = QI_DEV_IOTLB_ADDR(addr);
972
973 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
974 qdep = 0;
975
976 desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
977 QI_DIOTLB_TYPE;
978
979 qi_submit_sync(&desc, iommu);
980}
981
982
983
984
985void dmar_disable_qi(struct intel_iommu *iommu)
986{
987 unsigned long flags;
988 u32 sts;
989 cycles_t start_time = get_cycles();
990
991 if (!ecap_qis(iommu->ecap))
992 return;
993
994 spin_lock_irqsave(&iommu->register_lock, flags);
995
996 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
997 if (!(sts & DMA_GSTS_QIES))
998 goto end;
999
1000
1001
1002
1003 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1004 readl(iommu->reg + DMAR_IQH_REG)) &&
1005 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1006 cpu_relax();
1007
1008 iommu->gcmd &= ~DMA_GCMD_QIE;
1009 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1010
1011 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1012 !(sts & DMA_GSTS_QIES), sts);
1013end:
1014 spin_unlock_irqrestore(&iommu->register_lock, flags);
1015}
1016
1017
1018
1019
1020static void __dmar_enable_qi(struct intel_iommu *iommu)
1021{
1022 u32 sts;
1023 unsigned long flags;
1024 struct q_inval *qi = iommu->qi;
1025
1026 qi->free_head = qi->free_tail = 0;
1027 qi->free_cnt = QI_LENGTH;
1028
1029 spin_lock_irqsave(&iommu->register_lock, flags);
1030
1031
1032 writel(0, iommu->reg + DMAR_IQT_REG);
1033
1034 dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1035
1036 iommu->gcmd |= DMA_GCMD_QIE;
1037 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1038
1039
1040 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1041
1042 spin_unlock_irqrestore(&iommu->register_lock, flags);
1043}
1044
1045
1046
1047
1048
1049
1050int dmar_enable_qi(struct intel_iommu *iommu)
1051{
1052 struct q_inval *qi;
1053
1054 if (!ecap_qis(iommu->ecap))
1055 return -ENOENT;
1056
1057
1058
1059
1060 if (iommu->qi)
1061 return 0;
1062
1063 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1064 if (!iommu->qi)
1065 return -ENOMEM;
1066
1067 qi = iommu->qi;
1068
1069 qi->desc = (void *)(get_zeroed_page(GFP_ATOMIC));
1070 if (!qi->desc) {
1071 kfree(qi);
1072 iommu->qi = 0;
1073 return -ENOMEM;
1074 }
1075
1076 qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1077 if (!qi->desc_status) {
1078 free_page((unsigned long) qi->desc);
1079 kfree(qi);
1080 iommu->qi = 0;
1081 return -ENOMEM;
1082 }
1083
1084 qi->free_head = qi->free_tail = 0;
1085 qi->free_cnt = QI_LENGTH;
1086
1087 spin_lock_init(&qi->q_lock);
1088
1089 __dmar_enable_qi(iommu);
1090
1091 return 0;
1092}
1093
1094
1095
1096enum faulttype {
1097 DMA_REMAP,
1098 INTR_REMAP,
1099 UNKNOWN,
1100};
1101
1102static const char *dma_remap_fault_reasons[] =
1103{
1104 "Software",
1105 "Present bit in root entry is clear",
1106 "Present bit in context entry is clear",
1107 "Invalid context entry",
1108 "Access beyond MGAW",
1109 "PTE Write access is not set",
1110 "PTE Read access is not set",
1111 "Next page table ptr is invalid",
1112 "Root table address invalid",
1113 "Context table ptr is invalid",
1114 "non-zero reserved fields in RTP",
1115 "non-zero reserved fields in CTP",
1116 "non-zero reserved fields in PTE",
1117};
1118
1119static const char *intr_remap_fault_reasons[] =
1120{
1121 "Detected reserved fields in the decoded interrupt-remapped request",
1122 "Interrupt index exceeded the interrupt-remapping table size",
1123 "Present field in the IRTE entry is clear",
1124 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1125 "Detected reserved fields in the IRTE entry",
1126 "Blocked a compatibility format interrupt request",
1127 "Blocked an interrupt request due to source-id verification failure",
1128};
1129
1130#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
1131
1132const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1133{
1134 if (fault_reason >= 0x20 && (fault_reason <= 0x20 +
1135 ARRAY_SIZE(intr_remap_fault_reasons))) {
1136 *fault_type = INTR_REMAP;
1137 return intr_remap_fault_reasons[fault_reason - 0x20];
1138 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1139 *fault_type = DMA_REMAP;
1140 return dma_remap_fault_reasons[fault_reason];
1141 } else {
1142 *fault_type = UNKNOWN;
1143 return "Unknown";
1144 }
1145}
1146
1147void dmar_msi_unmask(unsigned int irq)
1148{
1149 struct intel_iommu *iommu = get_irq_data(irq);
1150 unsigned long flag;
1151
1152
1153 spin_lock_irqsave(&iommu->register_lock, flag);
1154 writel(0, iommu->reg + DMAR_FECTL_REG);
1155
1156 readl(iommu->reg + DMAR_FECTL_REG);
1157 spin_unlock_irqrestore(&iommu->register_lock, flag);
1158}
1159
1160void dmar_msi_mask(unsigned int irq)
1161{
1162 unsigned long flag;
1163 struct intel_iommu *iommu = get_irq_data(irq);
1164
1165
1166 spin_lock_irqsave(&iommu->register_lock, flag);
1167 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG);
1168
1169 readl(iommu->reg + DMAR_FECTL_REG);
1170 spin_unlock_irqrestore(&iommu->register_lock, flag);
1171}
1172
1173void dmar_msi_write(int irq, struct msi_msg *msg)
1174{
1175 struct intel_iommu *iommu = get_irq_data(irq);
1176 unsigned long flag;
1177
1178 spin_lock_irqsave(&iommu->register_lock, flag);
1179 writel(msg->data, iommu->reg + DMAR_FEDATA_REG);
1180 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG);
1181 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG);
1182 spin_unlock_irqrestore(&iommu->register_lock, flag);
1183}
1184
1185void dmar_msi_read(int irq, struct msi_msg *msg)
1186{
1187 struct intel_iommu *iommu = get_irq_data(irq);
1188 unsigned long flag;
1189
1190 spin_lock_irqsave(&iommu->register_lock, flag);
1191 msg->data = readl(iommu->reg + DMAR_FEDATA_REG);
1192 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG);
1193 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG);
1194 spin_unlock_irqrestore(&iommu->register_lock, flag);
1195}
1196
1197static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1198 u8 fault_reason, u16 source_id, unsigned long long addr)
1199{
1200 const char *reason;
1201 int fault_type;
1202
1203 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1204
1205 if (fault_type == INTR_REMAP)
1206 printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
1207 "fault index %llx\n"
1208 "INTR-REMAP:[fault reason %02d] %s\n",
1209 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1210 PCI_FUNC(source_id & 0xFF), addr >> 48,
1211 fault_reason, reason);
1212 else
1213 printk(KERN_ERR
1214 "DMAR:[%s] Request device [%02x:%02x.%d] "
1215 "fault addr %llx \n"
1216 "DMAR:[fault reason %02d] %s\n",
1217 (type ? "DMA Read" : "DMA Write"),
1218 (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1219 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1220 return 0;
1221}
1222
1223#define PRIMARY_FAULT_REG_LEN (16)
1224irqreturn_t dmar_fault(int irq, void *dev_id)
1225{
1226 struct intel_iommu *iommu = dev_id;
1227 int reg, fault_index;
1228 u32 fault_status;
1229 unsigned long flag;
1230
1231 spin_lock_irqsave(&iommu->register_lock, flag);
1232 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1233 if (fault_status)
1234 printk(KERN_ERR "DRHD: handling fault status reg %x\n",
1235 fault_status);
1236
1237
1238 if (!(fault_status & DMA_FSTS_PPF))
1239 goto clear_rest;
1240
1241 fault_index = dma_fsts_fault_record_index(fault_status);
1242 reg = cap_fault_reg_offset(iommu->cap);
1243 while (1) {
1244 u8 fault_reason;
1245 u16 source_id;
1246 u64 guest_addr;
1247 int type;
1248 u32 data;
1249
1250
1251 data = readl(iommu->reg + reg +
1252 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1253 if (!(data & DMA_FRCD_F))
1254 break;
1255
1256 fault_reason = dma_frcd_fault_reason(data);
1257 type = dma_frcd_type(data);
1258
1259 data = readl(iommu->reg + reg +
1260 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1261 source_id = dma_frcd_source_id(data);
1262
1263 guest_addr = dmar_readq(iommu->reg + reg +
1264 fault_index * PRIMARY_FAULT_REG_LEN);
1265 guest_addr = dma_frcd_page_addr(guest_addr);
1266
1267 writel(DMA_FRCD_F, iommu->reg + reg +
1268 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1269
1270 spin_unlock_irqrestore(&iommu->register_lock, flag);
1271
1272 dmar_fault_do_one(iommu, type, fault_reason,
1273 source_id, guest_addr);
1274
1275 fault_index++;
1276 if (fault_index >= cap_num_fault_regs(iommu->cap))
1277 fault_index = 0;
1278 spin_lock_irqsave(&iommu->register_lock, flag);
1279 }
1280clear_rest:
1281
1282 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1283 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1284
1285 spin_unlock_irqrestore(&iommu->register_lock, flag);
1286 return IRQ_HANDLED;
1287}
1288
1289int dmar_set_interrupt(struct intel_iommu *iommu)
1290{
1291 int irq, ret;
1292
1293
1294
1295
1296 if (iommu->irq)
1297 return 0;
1298
1299 irq = create_irq();
1300 if (!irq) {
1301 printk(KERN_ERR "IOMMU: no free vectors\n");
1302 return -EINVAL;
1303 }
1304
1305 set_irq_data(irq, iommu);
1306 iommu->irq = irq;
1307
1308 ret = arch_setup_dmar_msi(irq);
1309 if (ret) {
1310 set_irq_data(irq, NULL);
1311 iommu->irq = 0;
1312 destroy_irq(irq);
1313 return ret;
1314 }
1315
1316 ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
1317 if (ret)
1318 printk(KERN_ERR "IOMMU: can't request irq\n");
1319 return ret;
1320}
1321
1322int __init enable_drhd_fault_handling(void)
1323{
1324 struct dmar_drhd_unit *drhd;
1325
1326
1327
1328
1329 for_each_drhd_unit(drhd) {
1330 int ret;
1331 struct intel_iommu *iommu = drhd->iommu;
1332 ret = dmar_set_interrupt(iommu);
1333
1334 if (ret) {
1335 printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
1336 " interrupt, ret %d\n",
1337 (unsigned long long)drhd->reg_base_addr, ret);
1338 return -1;
1339 }
1340 }
1341
1342 return 0;
1343}
1344
1345
1346
1347
1348int dmar_reenable_qi(struct intel_iommu *iommu)
1349{
1350 if (!ecap_qis(iommu->ecap))
1351 return -ENOENT;
1352
1353 if (!iommu->qi)
1354 return -ENOENT;
1355
1356
1357
1358
1359 dmar_disable_qi(iommu);
1360
1361
1362
1363
1364
1365 __dmar_enable_qi(iommu);
1366
1367 return 0;
1368}
1369
1370
1371
1372
1373int dmar_ir_support(void)
1374{
1375 struct acpi_table_dmar *dmar;
1376 dmar = (struct acpi_table_dmar *)dmar_tbl;
1377 return dmar->flags & 0x1;
1378}
1379