1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#define pr_fmt(fmt) "DMAR: " fmt
18
19#include <linux/pci.h>
20#include <linux/dmar.h>
21#include <linux/iova.h>
22#include <linux/intel-iommu.h>
23#include <linux/timer.h>
24#include <linux/irq.h>
25#include <linux/interrupt.h>
26#include <linux/tboot.h>
27#include <linux/dmi.h>
28#include <linux/slab.h>
29#include <linux/iommu.h>
30#include <linux/numa.h>
31#include <asm/irq_remapping.h>
32#include <asm/iommu_table.h>
33
34#include "irq_remapping.h"
35
36typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
37struct dmar_res_callback {
38 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
39 void *arg[ACPI_DMAR_TYPE_RESERVED];
40 bool ignore_unhandled;
41 bool print_entry;
42};
43
44
45
46
47
48
49
50
51
52
53
54
55
56DECLARE_RWSEM(dmar_global_lock);
57LIST_HEAD(dmar_drhd_units);
58
59struct acpi_table_header * __initdata dmar_tbl;
60static int dmar_dev_scope_status = 1;
61static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
62
63static int alloc_iommu(struct dmar_drhd_unit *drhd);
64static void free_iommu(struct intel_iommu *iommu);
65
66extern const struct iommu_ops intel_iommu_ops;
67
68static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
69{
70
71
72
73
74 if (drhd->include_all)
75 list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
76 else
77 list_add_rcu(&drhd->list, &dmar_drhd_units);
78}
79
80void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
81{
82 struct acpi_dmar_device_scope *scope;
83
84 *cnt = 0;
85 while (start < end) {
86 scope = start;
87 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
88 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
89 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
90 (*cnt)++;
91 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
92 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
93 pr_warn("Unsupported device scope\n");
94 }
95 start += scope->length;
96 }
97 if (*cnt == 0)
98 return NULL;
99
100 return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
101}
102
103void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
104{
105 int i;
106 struct device *tmp_dev;
107
108 if (*devices && *cnt) {
109 for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
110 put_device(tmp_dev);
111 kfree(*devices);
112 }
113
114 *devices = NULL;
115 *cnt = 0;
116}
117
118
119static char dmar_pci_notify_info_buf[64];
120
121static struct dmar_pci_notify_info *
122dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
123{
124 int level = 0;
125 size_t size;
126 struct pci_dev *tmp;
127 struct dmar_pci_notify_info *info;
128
129 BUG_ON(dev->is_virtfn);
130
131
132 if (event == BUS_NOTIFY_ADD_DEVICE)
133 for (tmp = dev; tmp; tmp = tmp->bus->self)
134 level++;
135
136 size = struct_size(info, path, level);
137 if (size <= sizeof(dmar_pci_notify_info_buf)) {
138 info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
139 } else {
140 info = kzalloc(size, GFP_KERNEL);
141 if (!info) {
142 pr_warn("Out of memory when allocating notify_info "
143 "for %s.\n", pci_name(dev));
144 if (dmar_dev_scope_status == 0)
145 dmar_dev_scope_status = -ENOMEM;
146 return NULL;
147 }
148 }
149
150 info->event = event;
151 info->dev = dev;
152 info->seg = pci_domain_nr(dev->bus);
153 info->level = level;
154 if (event == BUS_NOTIFY_ADD_DEVICE) {
155 for (tmp = dev; tmp; tmp = tmp->bus->self) {
156 level--;
157 info->path[level].bus = tmp->bus->number;
158 info->path[level].device = PCI_SLOT(tmp->devfn);
159 info->path[level].function = PCI_FUNC(tmp->devfn);
160 if (pci_is_root_bus(tmp->bus))
161 info->bus = tmp->bus->number;
162 }
163 }
164
165 return info;
166}
167
168static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
169{
170 if ((void *)info != dmar_pci_notify_info_buf)
171 kfree(info);
172}
173
174static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
175 struct acpi_dmar_pci_path *path, int count)
176{
177 int i;
178
179 if (info->bus != bus)
180 goto fallback;
181 if (info->level != count)
182 goto fallback;
183
184 for (i = 0; i < count; i++) {
185 if (path[i].device != info->path[i].device ||
186 path[i].function != info->path[i].function)
187 goto fallback;
188 }
189
190 return true;
191
192fallback:
193
194 if (count != 1)
195 return false;
196
197 i = info->level - 1;
198 if (bus == info->path[i].bus &&
199 path[0].device == info->path[i].device &&
200 path[0].function == info->path[i].function) {
201 pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
202 bus, path[0].device, path[0].function);
203 return true;
204 }
205
206 return false;
207}
208
209
210int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
211 void *start, void*end, u16 segment,
212 struct dmar_dev_scope *devices,
213 int devices_cnt)
214{
215 int i, level;
216 struct device *tmp, *dev = &info->dev->dev;
217 struct acpi_dmar_device_scope *scope;
218 struct acpi_dmar_pci_path *path;
219
220 if (segment != info->seg)
221 return 0;
222
223 for (; start < end; start += scope->length) {
224 scope = start;
225 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
226 scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
227 continue;
228
229 path = (struct acpi_dmar_pci_path *)(scope + 1);
230 level = (scope->length - sizeof(*scope)) / sizeof(*path);
231 if (!dmar_match_pci_path(info, scope->bus, path, level))
232 continue;
233
234
235
236
237
238
239
240
241
242
243 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
244 info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) ||
245 (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE &&
246 (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
247 info->dev->class >> 8 != PCI_CLASS_BRIDGE_OTHER))) {
248 pr_warn("Device scope type does not match for %s\n",
249 pci_name(info->dev));
250 return -EINVAL;
251 }
252
253 for_each_dev_scope(devices, devices_cnt, i, tmp)
254 if (tmp == NULL) {
255 devices[i].bus = info->dev->bus->number;
256 devices[i].devfn = info->dev->devfn;
257 rcu_assign_pointer(devices[i].dev,
258 get_device(dev));
259 return 1;
260 }
261 BUG_ON(i >= devices_cnt);
262 }
263
264 return 0;
265}
266
267int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
268 struct dmar_dev_scope *devices, int count)
269{
270 int index;
271 struct device *tmp;
272
273 if (info->seg != segment)
274 return 0;
275
276 for_each_active_dev_scope(devices, count, index, tmp)
277 if (tmp == &info->dev->dev) {
278 RCU_INIT_POINTER(devices[index].dev, NULL);
279 synchronize_rcu();
280 put_device(tmp);
281 return 1;
282 }
283
284 return 0;
285}
286
287static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
288{
289 int ret = 0;
290 struct dmar_drhd_unit *dmaru;
291 struct acpi_dmar_hardware_unit *drhd;
292
293 for_each_drhd_unit(dmaru) {
294 if (dmaru->include_all)
295 continue;
296
297 drhd = container_of(dmaru->hdr,
298 struct acpi_dmar_hardware_unit, header);
299 ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
300 ((void *)drhd) + drhd->header.length,
301 dmaru->segment,
302 dmaru->devices, dmaru->devices_cnt);
303 if (ret)
304 break;
305 }
306 if (ret >= 0)
307 ret = dmar_iommu_notify_scope_dev(info);
308 if (ret < 0 && dmar_dev_scope_status == 0)
309 dmar_dev_scope_status = ret;
310
311 return ret;
312}
313
314static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
315{
316 struct dmar_drhd_unit *dmaru;
317
318 for_each_drhd_unit(dmaru)
319 if (dmar_remove_dev_scope(info, dmaru->segment,
320 dmaru->devices, dmaru->devices_cnt))
321 break;
322 dmar_iommu_notify_scope_dev(info);
323}
324
325static int dmar_pci_bus_notifier(struct notifier_block *nb,
326 unsigned long action, void *data)
327{
328 struct pci_dev *pdev = to_pci_dev(data);
329 struct dmar_pci_notify_info *info;
330
331
332
333
334 if (pdev->is_virtfn)
335 return NOTIFY_DONE;
336 if (action != BUS_NOTIFY_ADD_DEVICE &&
337 action != BUS_NOTIFY_REMOVED_DEVICE)
338 return NOTIFY_DONE;
339
340 info = dmar_alloc_pci_notify_info(pdev, action);
341 if (!info)
342 return NOTIFY_DONE;
343
344 down_write(&dmar_global_lock);
345 if (action == BUS_NOTIFY_ADD_DEVICE)
346 dmar_pci_bus_add_dev(info);
347 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
348 dmar_pci_bus_del_dev(info);
349 up_write(&dmar_global_lock);
350
351 dmar_free_pci_notify_info(info);
352
353 return NOTIFY_OK;
354}
355
356static struct notifier_block dmar_pci_bus_nb = {
357 .notifier_call = dmar_pci_bus_notifier,
358 .priority = INT_MIN,
359};
360
361static struct dmar_drhd_unit *
362dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
363{
364 struct dmar_drhd_unit *dmaru;
365
366 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
367 if (dmaru->segment == drhd->segment &&
368 dmaru->reg_base_addr == drhd->address)
369 return dmaru;
370
371 return NULL;
372}
373
374
375
376
377
378
379static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
380{
381 struct acpi_dmar_hardware_unit *drhd;
382 struct dmar_drhd_unit *dmaru;
383 int ret;
384
385 drhd = (struct acpi_dmar_hardware_unit *)header;
386 dmaru = dmar_find_dmaru(drhd);
387 if (dmaru)
388 goto out;
389
390 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
391 if (!dmaru)
392 return -ENOMEM;
393
394
395
396
397
398 dmaru->hdr = (void *)(dmaru + 1);
399 memcpy(dmaru->hdr, header, header->length);
400 dmaru->reg_base_addr = drhd->address;
401 dmaru->segment = drhd->segment;
402 dmaru->include_all = drhd->flags & 0x1;
403 dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
404 ((void *)drhd) + drhd->header.length,
405 &dmaru->devices_cnt);
406 if (dmaru->devices_cnt && dmaru->devices == NULL) {
407 kfree(dmaru);
408 return -ENOMEM;
409 }
410
411 ret = alloc_iommu(dmaru);
412 if (ret) {
413 dmar_free_dev_scope(&dmaru->devices,
414 &dmaru->devices_cnt);
415 kfree(dmaru);
416 return ret;
417 }
418 dmar_register_drhd_unit(dmaru);
419
420out:
421 if (arg)
422 (*(int *)arg)++;
423
424 return 0;
425}
426
427static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
428{
429 if (dmaru->devices && dmaru->devices_cnt)
430 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
431 if (dmaru->iommu)
432 free_iommu(dmaru->iommu);
433 kfree(dmaru);
434}
435
436static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
437 void *arg)
438{
439 struct acpi_dmar_andd *andd = (void *)header;
440
441
442 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
443 WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
444 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
445 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
446 dmi_get_system_info(DMI_BIOS_VENDOR),
447 dmi_get_system_info(DMI_BIOS_VERSION),
448 dmi_get_system_info(DMI_PRODUCT_VERSION));
449 return -EINVAL;
450 }
451 pr_info("ANDD device: %x name: %s\n", andd->device_number,
452 andd->device_name);
453
454 return 0;
455}
456
457#ifdef CONFIG_ACPI_NUMA
458static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
459{
460 struct acpi_dmar_rhsa *rhsa;
461 struct dmar_drhd_unit *drhd;
462
463 rhsa = (struct acpi_dmar_rhsa *)header;
464 for_each_drhd_unit(drhd) {
465 if (drhd->reg_base_addr == rhsa->base_address) {
466 int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
467
468 if (!node_online(node))
469 node = NUMA_NO_NODE;
470 drhd->iommu->node = node;
471 return 0;
472 }
473 }
474 WARN_TAINT(
475 1, TAINT_FIRMWARE_WORKAROUND,
476 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
477 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
478 drhd->reg_base_addr,
479 dmi_get_system_info(DMI_BIOS_VENDOR),
480 dmi_get_system_info(DMI_BIOS_VERSION),
481 dmi_get_system_info(DMI_PRODUCT_VERSION));
482
483 return 0;
484}
485#else
486#define dmar_parse_one_rhsa dmar_res_noop
487#endif
488
489static void
490dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
491{
492 struct acpi_dmar_hardware_unit *drhd;
493 struct acpi_dmar_reserved_memory *rmrr;
494 struct acpi_dmar_atsr *atsr;
495 struct acpi_dmar_rhsa *rhsa;
496
497 switch (header->type) {
498 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
499 drhd = container_of(header, struct acpi_dmar_hardware_unit,
500 header);
501 pr_info("DRHD base: %#016Lx flags: %#x\n",
502 (unsigned long long)drhd->address, drhd->flags);
503 break;
504 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
505 rmrr = container_of(header, struct acpi_dmar_reserved_memory,
506 header);
507 pr_info("RMRR base: %#016Lx end: %#016Lx\n",
508 (unsigned long long)rmrr->base_address,
509 (unsigned long long)rmrr->end_address);
510 break;
511 case ACPI_DMAR_TYPE_ROOT_ATS:
512 atsr = container_of(header, struct acpi_dmar_atsr, header);
513 pr_info("ATSR flags: %#x\n", atsr->flags);
514 break;
515 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
516 rhsa = container_of(header, struct acpi_dmar_rhsa, header);
517 pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
518 (unsigned long long)rhsa->base_address,
519 rhsa->proximity_domain);
520 break;
521 case ACPI_DMAR_TYPE_NAMESPACE:
522
523
524 break;
525 }
526}
527
528
529
530
531static int __init dmar_table_detect(void)
532{
533 acpi_status status = AE_OK;
534
535
536 status = acpi_get_table(ACPI_SIG_DMAR, 0, &dmar_tbl);
537
538 if (ACPI_SUCCESS(status) && !dmar_tbl) {
539 pr_warn("Unable to map DMAR\n");
540 status = AE_NOT_FOUND;
541 }
542
543 return ACPI_SUCCESS(status) ? 0 : -ENOENT;
544}
545
546static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
547 size_t len, struct dmar_res_callback *cb)
548{
549 struct acpi_dmar_header *iter, *next;
550 struct acpi_dmar_header *end = ((void *)start) + len;
551
552 for (iter = start; iter < end; iter = next) {
553 next = (void *)iter + iter->length;
554 if (iter->length == 0) {
555
556 pr_debug(FW_BUG "Invalid 0-length structure\n");
557 break;
558 } else if (next > end) {
559
560 pr_warn(FW_BUG "Record passes table end\n");
561 return -EINVAL;
562 }
563
564 if (cb->print_entry)
565 dmar_table_print_dmar_entry(iter);
566
567 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
568
569 pr_debug("Unknown DMAR structure type %d\n",
570 iter->type);
571 } else if (cb->cb[iter->type]) {
572 int ret;
573
574 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
575 if (ret)
576 return ret;
577 } else if (!cb->ignore_unhandled) {
578 pr_warn("No handler for DMAR structure type %d\n",
579 iter->type);
580 return -EINVAL;
581 }
582 }
583
584 return 0;
585}
586
587static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
588 struct dmar_res_callback *cb)
589{
590 return dmar_walk_remapping_entries((void *)(dmar + 1),
591 dmar->header.length - sizeof(*dmar), cb);
592}
593
594
595
596
597static int __init
598parse_dmar_table(void)
599{
600 struct acpi_table_dmar *dmar;
601 int drhd_count = 0;
602 int ret;
603 struct dmar_res_callback cb = {
604 .print_entry = true,
605 .ignore_unhandled = true,
606 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
607 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
608 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
609 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
610 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
611 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
612 };
613
614
615
616
617
618 dmar_table_detect();
619
620
621
622
623
624 dmar_tbl = tboot_get_dmar_table(dmar_tbl);
625
626 dmar = (struct acpi_table_dmar *)dmar_tbl;
627 if (!dmar)
628 return -ENODEV;
629
630 if (dmar->width < PAGE_SHIFT - 1) {
631 pr_warn("Invalid DMAR haw\n");
632 return -EINVAL;
633 }
634
635 pr_info("Host address width %d\n", dmar->width + 1);
636 ret = dmar_walk_dmar_table(dmar, &cb);
637 if (ret == 0 && drhd_count == 0)
638 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
639
640 return ret;
641}
642
643static int dmar_pci_device_match(struct dmar_dev_scope devices[],
644 int cnt, struct pci_dev *dev)
645{
646 int index;
647 struct device *tmp;
648
649 while (dev) {
650 for_each_active_dev_scope(devices, cnt, index, tmp)
651 if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
652 return 1;
653
654
655 dev = dev->bus->self;
656 }
657
658 return 0;
659}
660
661struct dmar_drhd_unit *
662dmar_find_matched_drhd_unit(struct pci_dev *dev)
663{
664 struct dmar_drhd_unit *dmaru;
665 struct acpi_dmar_hardware_unit *drhd;
666
667 dev = pci_physfn(dev);
668
669 rcu_read_lock();
670 for_each_drhd_unit(dmaru) {
671 drhd = container_of(dmaru->hdr,
672 struct acpi_dmar_hardware_unit,
673 header);
674
675 if (dmaru->include_all &&
676 drhd->segment == pci_domain_nr(dev->bus))
677 goto out;
678
679 if (dmar_pci_device_match(dmaru->devices,
680 dmaru->devices_cnt, dev))
681 goto out;
682 }
683 dmaru = NULL;
684out:
685 rcu_read_unlock();
686
687 return dmaru;
688}
689
690static void __init dmar_acpi_insert_dev_scope(u8 device_number,
691 struct acpi_device *adev)
692{
693 struct dmar_drhd_unit *dmaru;
694 struct acpi_dmar_hardware_unit *drhd;
695 struct acpi_dmar_device_scope *scope;
696 struct device *tmp;
697 int i;
698 struct acpi_dmar_pci_path *path;
699
700 for_each_drhd_unit(dmaru) {
701 drhd = container_of(dmaru->hdr,
702 struct acpi_dmar_hardware_unit,
703 header);
704
705 for (scope = (void *)(drhd + 1);
706 (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
707 scope = ((void *)scope) + scope->length) {
708 if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
709 continue;
710 if (scope->enumeration_id != device_number)
711 continue;
712
713 path = (void *)(scope + 1);
714 pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
715 dev_name(&adev->dev), dmaru->reg_base_addr,
716 scope->bus, path->device, path->function);
717 for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
718 if (tmp == NULL) {
719 dmaru->devices[i].bus = scope->bus;
720 dmaru->devices[i].devfn = PCI_DEVFN(path->device,
721 path->function);
722 rcu_assign_pointer(dmaru->devices[i].dev,
723 get_device(&adev->dev));
724 return;
725 }
726 BUG_ON(i >= dmaru->devices_cnt);
727 }
728 }
729 pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
730 device_number, dev_name(&adev->dev));
731}
732
733static int __init dmar_acpi_dev_scope_init(void)
734{
735 struct acpi_dmar_andd *andd;
736
737 if (dmar_tbl == NULL)
738 return -ENODEV;
739
740 for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
741 ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
742 andd = ((void *)andd) + andd->header.length) {
743 if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
744 acpi_handle h;
745 struct acpi_device *adev;
746
747 if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
748 andd->device_name,
749 &h))) {
750 pr_err("Failed to find handle for ACPI object %s\n",
751 andd->device_name);
752 continue;
753 }
754 if (acpi_bus_get_device(h, &adev)) {
755 pr_err("Failed to get device for ACPI object %s\n",
756 andd->device_name);
757 continue;
758 }
759 dmar_acpi_insert_dev_scope(andd->device_number, adev);
760 }
761 }
762 return 0;
763}
764
765int __init dmar_dev_scope_init(void)
766{
767 struct pci_dev *dev = NULL;
768 struct dmar_pci_notify_info *info;
769
770 if (dmar_dev_scope_status != 1)
771 return dmar_dev_scope_status;
772
773 if (list_empty(&dmar_drhd_units)) {
774 dmar_dev_scope_status = -ENODEV;
775 } else {
776 dmar_dev_scope_status = 0;
777
778 dmar_acpi_dev_scope_init();
779
780 for_each_pci_dev(dev) {
781 if (dev->is_virtfn)
782 continue;
783
784 info = dmar_alloc_pci_notify_info(dev,
785 BUS_NOTIFY_ADD_DEVICE);
786 if (!info) {
787 return dmar_dev_scope_status;
788 } else {
789 dmar_pci_bus_add_dev(info);
790 dmar_free_pci_notify_info(info);
791 }
792 }
793 }
794
795 return dmar_dev_scope_status;
796}
797
798void __init dmar_register_bus_notifier(void)
799{
800 bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
801}
802
803
804int __init dmar_table_init(void)
805{
806 static int dmar_table_initialized;
807 int ret;
808
809 if (dmar_table_initialized == 0) {
810 ret = parse_dmar_table();
811 if (ret < 0) {
812 if (ret != -ENODEV)
813 pr_info("Parse DMAR table failure.\n");
814 } else if (list_empty(&dmar_drhd_units)) {
815 pr_info("No DMAR devices found\n");
816 ret = -ENODEV;
817 }
818
819 if (ret < 0)
820 dmar_table_initialized = ret;
821 else
822 dmar_table_initialized = 1;
823 }
824
825 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
826}
827
828static void warn_invalid_dmar(u64 addr, const char *message)
829{
830 WARN_TAINT_ONCE(
831 1, TAINT_FIRMWARE_WORKAROUND,
832 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
833 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
834 addr, message,
835 dmi_get_system_info(DMI_BIOS_VENDOR),
836 dmi_get_system_info(DMI_BIOS_VERSION),
837 dmi_get_system_info(DMI_PRODUCT_VERSION));
838}
839
840static int __ref
841dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
842{
843 struct acpi_dmar_hardware_unit *drhd;
844 void __iomem *addr;
845 u64 cap, ecap;
846
847 drhd = (void *)entry;
848 if (!drhd->address) {
849 warn_invalid_dmar(0, "");
850 return -EINVAL;
851 }
852
853 if (arg)
854 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
855 else
856 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
857 if (!addr) {
858 pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
859 return -EINVAL;
860 }
861
862 cap = dmar_readq(addr + DMAR_CAP_REG);
863 ecap = dmar_readq(addr + DMAR_ECAP_REG);
864
865 if (arg)
866 iounmap(addr);
867 else
868 early_iounmap(addr, VTD_PAGE_SIZE);
869
870 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
871 warn_invalid_dmar(drhd->address, " returns all ones");
872 return -EINVAL;
873 }
874
875 return 0;
876}
877
878int __init detect_intel_iommu(void)
879{
880 int ret;
881 struct dmar_res_callback validate_drhd_cb = {
882 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
883 .ignore_unhandled = true,
884 };
885
886 down_write(&dmar_global_lock);
887 ret = dmar_table_detect();
888 if (!ret)
889 ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
890 &validate_drhd_cb);
891 if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
892 iommu_detected = 1;
893
894 pci_request_acs();
895 }
896
897#ifdef CONFIG_X86
898 if (!ret)
899 x86_init.iommu.iommu_init = intel_iommu_init;
900#endif
901
902 if (dmar_tbl) {
903 acpi_put_table(dmar_tbl);
904 dmar_tbl = NULL;
905 }
906 up_write(&dmar_global_lock);
907
908 return ret ? ret : 1;
909}
910
911static void unmap_iommu(struct intel_iommu *iommu)
912{
913 iounmap(iommu->reg);
914 release_mem_region(iommu->reg_phys, iommu->reg_size);
915}
916
917
918
919
920
921
922
923
924
925static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
926{
927 int map_size, err=0;
928
929 iommu->reg_phys = phys_addr;
930 iommu->reg_size = VTD_PAGE_SIZE;
931
932 if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
933 pr_err("Can't reserve memory\n");
934 err = -EBUSY;
935 goto out;
936 }
937
938 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
939 if (!iommu->reg) {
940 pr_err("Can't map the region\n");
941 err = -ENOMEM;
942 goto release;
943 }
944
945 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
946 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
947
948 if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
949 err = -EINVAL;
950 warn_invalid_dmar(phys_addr, " returns all ones");
951 goto unmap;
952 }
953
954
955 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
956 cap_max_fault_reg_offset(iommu->cap));
957 map_size = VTD_PAGE_ALIGN(map_size);
958 if (map_size > iommu->reg_size) {
959 iounmap(iommu->reg);
960 release_mem_region(iommu->reg_phys, iommu->reg_size);
961 iommu->reg_size = map_size;
962 if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
963 iommu->name)) {
964 pr_err("Can't reserve memory\n");
965 err = -EBUSY;
966 goto out;
967 }
968 iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
969 if (!iommu->reg) {
970 pr_err("Can't map the region\n");
971 err = -ENOMEM;
972 goto release;
973 }
974 }
975 err = 0;
976 goto out;
977
978unmap:
979 iounmap(iommu->reg);
980release:
981 release_mem_region(iommu->reg_phys, iommu->reg_size);
982out:
983 return err;
984}
985
986static int dmar_alloc_seq_id(struct intel_iommu *iommu)
987{
988 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
989 DMAR_UNITS_SUPPORTED);
990 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
991 iommu->seq_id = -1;
992 } else {
993 set_bit(iommu->seq_id, dmar_seq_ids);
994 sprintf(iommu->name, "dmar%d", iommu->seq_id);
995 }
996
997 return iommu->seq_id;
998}
999
1000static void dmar_free_seq_id(struct intel_iommu *iommu)
1001{
1002 if (iommu->seq_id >= 0) {
1003 clear_bit(iommu->seq_id, dmar_seq_ids);
1004 iommu->seq_id = -1;
1005 }
1006}
1007
1008static int alloc_iommu(struct dmar_drhd_unit *drhd)
1009{
1010 struct intel_iommu *iommu;
1011 u32 ver, sts;
1012 int agaw = 0;
1013 int msagaw = 0;
1014 int err;
1015
1016 if (!drhd->reg_base_addr) {
1017 warn_invalid_dmar(0, "");
1018 return -EINVAL;
1019 }
1020
1021 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1022 if (!iommu)
1023 return -ENOMEM;
1024
1025 if (dmar_alloc_seq_id(iommu) < 0) {
1026 pr_err("Failed to allocate seq_id\n");
1027 err = -ENOSPC;
1028 goto error;
1029 }
1030
1031 err = map_iommu(iommu, drhd->reg_base_addr);
1032 if (err) {
1033 pr_err("Failed to map %s\n", iommu->name);
1034 goto error_free_seq_id;
1035 }
1036
1037 err = -EINVAL;
1038 agaw = iommu_calculate_agaw(iommu);
1039 if (agaw < 0) {
1040 pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1041 iommu->seq_id);
1042 goto err_unmap;
1043 }
1044 msagaw = iommu_calculate_max_sagaw(iommu);
1045 if (msagaw < 0) {
1046 pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1047 iommu->seq_id);
1048 goto err_unmap;
1049 }
1050 iommu->agaw = agaw;
1051 iommu->msagaw = msagaw;
1052 iommu->segment = drhd->segment;
1053
1054 iommu->node = NUMA_NO_NODE;
1055
1056 ver = readl(iommu->reg + DMAR_VER_REG);
1057 pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1058 iommu->name,
1059 (unsigned long long)drhd->reg_base_addr,
1060 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1061 (unsigned long long)iommu->cap,
1062 (unsigned long long)iommu->ecap);
1063
1064
1065 sts = readl(iommu->reg + DMAR_GSTS_REG);
1066 if (sts & DMA_GSTS_IRES)
1067 iommu->gcmd |= DMA_GCMD_IRE;
1068 if (sts & DMA_GSTS_TES)
1069 iommu->gcmd |= DMA_GCMD_TE;
1070 if (sts & DMA_GSTS_QIES)
1071 iommu->gcmd |= DMA_GCMD_QIE;
1072
1073 raw_spin_lock_init(&iommu->register_lock);
1074
1075 if (intel_iommu_enabled) {
1076 err = iommu_device_sysfs_add(&iommu->iommu, NULL,
1077 intel_iommu_groups,
1078 "%s", iommu->name);
1079 if (err)
1080 goto err_unmap;
1081
1082 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
1083
1084 err = iommu_device_register(&iommu->iommu);
1085 if (err)
1086 goto err_unmap;
1087 }
1088
1089 drhd->iommu = iommu;
1090
1091 return 0;
1092
1093err_unmap:
1094 unmap_iommu(iommu);
1095error_free_seq_id:
1096 dmar_free_seq_id(iommu);
1097error:
1098 kfree(iommu);
1099 return err;
1100}
1101
1102static void free_iommu(struct intel_iommu *iommu)
1103{
1104 if (intel_iommu_enabled) {
1105 iommu_device_unregister(&iommu->iommu);
1106 iommu_device_sysfs_remove(&iommu->iommu);
1107 }
1108
1109 if (iommu->irq) {
1110 if (iommu->pr_irq) {
1111 free_irq(iommu->pr_irq, iommu);
1112 dmar_free_hwirq(iommu->pr_irq);
1113 iommu->pr_irq = 0;
1114 }
1115 free_irq(iommu->irq, iommu);
1116 dmar_free_hwirq(iommu->irq);
1117 iommu->irq = 0;
1118 }
1119
1120 if (iommu->qi) {
1121 free_page((unsigned long)iommu->qi->desc);
1122 kfree(iommu->qi->desc_status);
1123 kfree(iommu->qi);
1124 }
1125
1126 if (iommu->reg)
1127 unmap_iommu(iommu);
1128
1129 dmar_free_seq_id(iommu);
1130 kfree(iommu);
1131}
1132
1133
1134
1135
1136static inline void reclaim_free_desc(struct q_inval *qi)
1137{
1138 while (qi->desc_status[qi->free_tail] == QI_DONE ||
1139 qi->desc_status[qi->free_tail] == QI_ABORT) {
1140 qi->desc_status[qi->free_tail] = QI_FREE;
1141 qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1142 qi->free_cnt++;
1143 }
1144}
1145
1146static int qi_check_fault(struct intel_iommu *iommu, int index)
1147{
1148 u32 fault;
1149 int head, tail;
1150 struct q_inval *qi = iommu->qi;
1151 int wait_index = (index + 1) % QI_LENGTH;
1152 int shift = qi_shift(iommu);
1153
1154 if (qi->desc_status[wait_index] == QI_ABORT)
1155 return -EAGAIN;
1156
1157 fault = readl(iommu->reg + DMAR_FSTS_REG);
1158
1159
1160
1161
1162
1163
1164 if (fault & DMA_FSTS_IQE) {
1165 head = readl(iommu->reg + DMAR_IQH_REG);
1166 if ((head >> shift) == index) {
1167 struct qi_desc *desc = qi->desc + head;
1168
1169
1170
1171
1172
1173
1174 pr_err("VT-d detected invalid descriptor: qw0 = %llx, qw1 = %llx\n",
1175 (unsigned long long)desc->qw0,
1176 (unsigned long long)desc->qw1);
1177 memcpy(desc, qi->desc + (wait_index << shift),
1178 1 << shift);
1179 writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1180 return -EINVAL;
1181 }
1182 }
1183
1184
1185
1186
1187
1188 if (fault & DMA_FSTS_ITE) {
1189 head = readl(iommu->reg + DMAR_IQH_REG);
1190 head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1191 head |= 1;
1192 tail = readl(iommu->reg + DMAR_IQT_REG);
1193 tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH;
1194
1195 writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1196
1197 do {
1198 if (qi->desc_status[head] == QI_IN_USE)
1199 qi->desc_status[head] = QI_ABORT;
1200 head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1201 } while (head != tail);
1202
1203 if (qi->desc_status[wait_index] == QI_ABORT)
1204 return -EAGAIN;
1205 }
1206
1207 if (fault & DMA_FSTS_ICE)
1208 writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1209
1210 return 0;
1211}
1212
1213
1214
1215
1216
1217int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1218{
1219 int rc;
1220 struct q_inval *qi = iommu->qi;
1221 int offset, shift, length;
1222 struct qi_desc wait_desc;
1223 int wait_index, index;
1224 unsigned long flags;
1225
1226 if (!qi)
1227 return 0;
1228
1229restart:
1230 rc = 0;
1231
1232 raw_spin_lock_irqsave(&qi->q_lock, flags);
1233 while (qi->free_cnt < 3) {
1234 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1235 cpu_relax();
1236 raw_spin_lock_irqsave(&qi->q_lock, flags);
1237 }
1238
1239 index = qi->free_head;
1240 wait_index = (index + 1) % QI_LENGTH;
1241 shift = qi_shift(iommu);
1242 length = 1 << shift;
1243
1244 qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1245
1246 offset = index << shift;
1247 memcpy(qi->desc + offset, desc, length);
1248 wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
1249 QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1250 wait_desc.qw1 = virt_to_phys(&qi->desc_status[wait_index]);
1251 wait_desc.qw2 = 0;
1252 wait_desc.qw3 = 0;
1253
1254 offset = wait_index << shift;
1255 memcpy(qi->desc + offset, &wait_desc, length);
1256
1257 qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1258 qi->free_cnt -= 2;
1259
1260
1261
1262
1263
1264 writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
1265
1266 while (qi->desc_status[wait_index] != QI_DONE) {
1267
1268
1269
1270
1271
1272
1273
1274 rc = qi_check_fault(iommu, index);
1275 if (rc)
1276 break;
1277
1278 raw_spin_unlock(&qi->q_lock);
1279 cpu_relax();
1280 raw_spin_lock(&qi->q_lock);
1281 }
1282
1283 qi->desc_status[index] = QI_DONE;
1284
1285 reclaim_free_desc(qi);
1286 raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1287
1288 if (rc == -EAGAIN)
1289 goto restart;
1290
1291 return rc;
1292}
1293
1294
1295
1296
1297void qi_global_iec(struct intel_iommu *iommu)
1298{
1299 struct qi_desc desc;
1300
1301 desc.qw0 = QI_IEC_TYPE;
1302 desc.qw1 = 0;
1303 desc.qw2 = 0;
1304 desc.qw3 = 0;
1305
1306
1307 qi_submit_sync(&desc, iommu);
1308}
1309
1310void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1311 u64 type)
1312{
1313 struct qi_desc desc;
1314
1315 desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1316 | QI_CC_GRAN(type) | QI_CC_TYPE;
1317 desc.qw1 = 0;
1318 desc.qw2 = 0;
1319 desc.qw3 = 0;
1320
1321 qi_submit_sync(&desc, iommu);
1322}
1323
1324void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1325 unsigned int size_order, u64 type)
1326{
1327 u8 dw = 0, dr = 0;
1328
1329 struct qi_desc desc;
1330 int ih = 0;
1331
1332 if (cap_write_drain(iommu->cap))
1333 dw = 1;
1334
1335 if (cap_read_drain(iommu->cap))
1336 dr = 1;
1337
1338 desc.qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1339 | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1340 desc.qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1341 | QI_IOTLB_AM(size_order);
1342 desc.qw2 = 0;
1343 desc.qw3 = 0;
1344
1345 qi_submit_sync(&desc, iommu);
1346}
1347
1348void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1349 u16 qdep, u64 addr, unsigned mask)
1350{
1351 struct qi_desc desc;
1352
1353 if (mask) {
1354 WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
1355 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1356 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1357 } else
1358 desc.qw1 = QI_DEV_IOTLB_ADDR(addr);
1359
1360 if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1361 qdep = 0;
1362
1363 desc.qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1364 QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1365 desc.qw2 = 0;
1366 desc.qw3 = 0;
1367
1368 qi_submit_sync(&desc, iommu);
1369}
1370
1371
1372
1373
1374void dmar_disable_qi(struct intel_iommu *iommu)
1375{
1376 unsigned long flags;
1377 u32 sts;
1378 cycles_t start_time = get_cycles();
1379
1380 if (!ecap_qis(iommu->ecap))
1381 return;
1382
1383 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1384
1385 sts = readl(iommu->reg + DMAR_GSTS_REG);
1386 if (!(sts & DMA_GSTS_QIES))
1387 goto end;
1388
1389
1390
1391
1392 while ((readl(iommu->reg + DMAR_IQT_REG) !=
1393 readl(iommu->reg + DMAR_IQH_REG)) &&
1394 (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1395 cpu_relax();
1396
1397 iommu->gcmd &= ~DMA_GCMD_QIE;
1398 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1399
1400 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1401 !(sts & DMA_GSTS_QIES), sts);
1402end:
1403 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1404}
1405
1406
1407
1408
1409static void __dmar_enable_qi(struct intel_iommu *iommu)
1410{
1411 u32 sts;
1412 unsigned long flags;
1413 struct q_inval *qi = iommu->qi;
1414 u64 val = virt_to_phys(qi->desc);
1415
1416 qi->free_head = qi->free_tail = 0;
1417 qi->free_cnt = QI_LENGTH;
1418
1419
1420
1421
1422
1423 if (ecap_smts(iommu->ecap))
1424 val |= (1 << 11) | 1;
1425
1426 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1427
1428
1429 writel(0, iommu->reg + DMAR_IQT_REG);
1430
1431 dmar_writeq(iommu->reg + DMAR_IQA_REG, val);
1432
1433 iommu->gcmd |= DMA_GCMD_QIE;
1434 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1435
1436
1437 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1438
1439 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1440}
1441
1442
1443
1444
1445
1446
1447int dmar_enable_qi(struct intel_iommu *iommu)
1448{
1449 struct q_inval *qi;
1450 struct page *desc_page;
1451
1452 if (!ecap_qis(iommu->ecap))
1453 return -ENOENT;
1454
1455
1456
1457
1458 if (iommu->qi)
1459 return 0;
1460
1461 iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1462 if (!iommu->qi)
1463 return -ENOMEM;
1464
1465 qi = iommu->qi;
1466
1467
1468
1469
1470
1471 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
1472 !!ecap_smts(iommu->ecap));
1473 if (!desc_page) {
1474 kfree(qi);
1475 iommu->qi = NULL;
1476 return -ENOMEM;
1477 }
1478
1479 qi->desc = page_address(desc_page);
1480
1481 qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
1482 if (!qi->desc_status) {
1483 free_page((unsigned long) qi->desc);
1484 kfree(qi);
1485 iommu->qi = NULL;
1486 return -ENOMEM;
1487 }
1488
1489 raw_spin_lock_init(&qi->q_lock);
1490
1491 __dmar_enable_qi(iommu);
1492
1493 return 0;
1494}
1495
1496
1497
1498enum faulttype {
1499 DMA_REMAP,
1500 INTR_REMAP,
1501 UNKNOWN,
1502};
1503
1504static const char *dma_remap_fault_reasons[] =
1505{
1506 "Software",
1507 "Present bit in root entry is clear",
1508 "Present bit in context entry is clear",
1509 "Invalid context entry",
1510 "Access beyond MGAW",
1511 "PTE Write access is not set",
1512 "PTE Read access is not set",
1513 "Next page table ptr is invalid",
1514 "Root table address invalid",
1515 "Context table ptr is invalid",
1516 "non-zero reserved fields in RTP",
1517 "non-zero reserved fields in CTP",
1518 "non-zero reserved fields in PTE",
1519 "PCE for translation request specifies blocking",
1520};
1521
1522static const char *irq_remap_fault_reasons[] =
1523{
1524 "Detected reserved fields in the decoded interrupt-remapped request",
1525 "Interrupt index exceeded the interrupt-remapping table size",
1526 "Present field in the IRTE entry is clear",
1527 "Error accessing interrupt-remapping table pointed by IRTA_REG",
1528 "Detected reserved fields in the IRTE entry",
1529 "Blocked a compatibility format interrupt request",
1530 "Blocked an interrupt request due to source-id verification failure",
1531};
1532
1533static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1534{
1535 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1536 ARRAY_SIZE(irq_remap_fault_reasons))) {
1537 *fault_type = INTR_REMAP;
1538 return irq_remap_fault_reasons[fault_reason - 0x20];
1539 } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1540 *fault_type = DMA_REMAP;
1541 return dma_remap_fault_reasons[fault_reason];
1542 } else {
1543 *fault_type = UNKNOWN;
1544 return "Unknown";
1545 }
1546}
1547
1548
1549static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1550{
1551 if (iommu->irq == irq)
1552 return DMAR_FECTL_REG;
1553 else if (iommu->pr_irq == irq)
1554 return DMAR_PECTL_REG;
1555 else
1556 BUG();
1557}
1558
1559void dmar_msi_unmask(struct irq_data *data)
1560{
1561 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1562 int reg = dmar_msi_reg(iommu, data->irq);
1563 unsigned long flag;
1564
1565
1566 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1567 writel(0, iommu->reg + reg);
1568
1569 readl(iommu->reg + reg);
1570 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1571}
1572
1573void dmar_msi_mask(struct irq_data *data)
1574{
1575 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1576 int reg = dmar_msi_reg(iommu, data->irq);
1577 unsigned long flag;
1578
1579
1580 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1581 writel(DMA_FECTL_IM, iommu->reg + reg);
1582
1583 readl(iommu->reg + reg);
1584 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1585}
1586
1587void dmar_msi_write(int irq, struct msi_msg *msg)
1588{
1589 struct intel_iommu *iommu = irq_get_handler_data(irq);
1590 int reg = dmar_msi_reg(iommu, irq);
1591 unsigned long flag;
1592
1593 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1594 writel(msg->data, iommu->reg + reg + 4);
1595 writel(msg->address_lo, iommu->reg + reg + 8);
1596 writel(msg->address_hi, iommu->reg + reg + 12);
1597 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1598}
1599
1600void dmar_msi_read(int irq, struct msi_msg *msg)
1601{
1602 struct intel_iommu *iommu = irq_get_handler_data(irq);
1603 int reg = dmar_msi_reg(iommu, irq);
1604 unsigned long flag;
1605
1606 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1607 msg->data = readl(iommu->reg + reg + 4);
1608 msg->address_lo = readl(iommu->reg + reg + 8);
1609 msg->address_hi = readl(iommu->reg + reg + 12);
1610 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1611}
1612
1613static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1614 u8 fault_reason, u16 source_id, unsigned long long addr)
1615{
1616 const char *reason;
1617 int fault_type;
1618
1619 reason = dmar_get_fault_reason(fault_reason, &fault_type);
1620
1621 if (fault_type == INTR_REMAP)
1622 pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
1623 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1624 PCI_FUNC(source_id & 0xFF), addr >> 48,
1625 fault_reason, reason);
1626 else
1627 pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
1628 type ? "DMA Read" : "DMA Write",
1629 source_id >> 8, PCI_SLOT(source_id & 0xFF),
1630 PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1631 return 0;
1632}
1633
1634#define PRIMARY_FAULT_REG_LEN (16)
1635irqreturn_t dmar_fault(int irq, void *dev_id)
1636{
1637 struct intel_iommu *iommu = dev_id;
1638 int reg, fault_index;
1639 u32 fault_status;
1640 unsigned long flag;
1641 static DEFINE_RATELIMIT_STATE(rs,
1642 DEFAULT_RATELIMIT_INTERVAL,
1643 DEFAULT_RATELIMIT_BURST);
1644
1645 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1646 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1647 if (fault_status && __ratelimit(&rs))
1648 pr_err("DRHD: handling fault status reg %x\n", fault_status);
1649
1650
1651 if (!(fault_status & DMA_FSTS_PPF))
1652 goto unlock_exit;
1653
1654 fault_index = dma_fsts_fault_record_index(fault_status);
1655 reg = cap_fault_reg_offset(iommu->cap);
1656 while (1) {
1657
1658 bool ratelimited = !__ratelimit(&rs);
1659 u8 fault_reason;
1660 u16 source_id;
1661 u64 guest_addr;
1662 int type;
1663 u32 data;
1664
1665
1666 data = readl(iommu->reg + reg +
1667 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1668 if (!(data & DMA_FRCD_F))
1669 break;
1670
1671 if (!ratelimited) {
1672 fault_reason = dma_frcd_fault_reason(data);
1673 type = dma_frcd_type(data);
1674
1675 data = readl(iommu->reg + reg +
1676 fault_index * PRIMARY_FAULT_REG_LEN + 8);
1677 source_id = dma_frcd_source_id(data);
1678
1679 guest_addr = dmar_readq(iommu->reg + reg +
1680 fault_index * PRIMARY_FAULT_REG_LEN);
1681 guest_addr = dma_frcd_page_addr(guest_addr);
1682 }
1683
1684
1685 writel(DMA_FRCD_F, iommu->reg + reg +
1686 fault_index * PRIMARY_FAULT_REG_LEN + 12);
1687
1688 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1689
1690 if (!ratelimited)
1691 dmar_fault_do_one(iommu, type, fault_reason,
1692 source_id, guest_addr);
1693
1694 fault_index++;
1695 if (fault_index >= cap_num_fault_regs(iommu->cap))
1696 fault_index = 0;
1697 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1698 }
1699
1700 writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
1701 iommu->reg + DMAR_FSTS_REG);
1702
1703unlock_exit:
1704 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1705 return IRQ_HANDLED;
1706}
1707
1708int dmar_set_interrupt(struct intel_iommu *iommu)
1709{
1710 int irq, ret;
1711
1712
1713
1714
1715 if (iommu->irq)
1716 return 0;
1717
1718 irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1719 if (irq > 0) {
1720 iommu->irq = irq;
1721 } else {
1722 pr_err("No free IRQ vectors\n");
1723 return -EINVAL;
1724 }
1725
1726 ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1727 if (ret)
1728 pr_err("Can't request irq\n");
1729 return ret;
1730}
1731
1732int __init enable_drhd_fault_handling(void)
1733{
1734 struct dmar_drhd_unit *drhd;
1735 struct intel_iommu *iommu;
1736
1737
1738
1739
1740 for_each_iommu(iommu, drhd) {
1741 u32 fault_status;
1742 int ret = dmar_set_interrupt(iommu);
1743
1744 if (ret) {
1745 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1746 (unsigned long long)drhd->reg_base_addr, ret);
1747 return -1;
1748 }
1749
1750
1751
1752
1753 dmar_fault(iommu->irq, iommu);
1754 fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1755 writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1756 }
1757
1758 return 0;
1759}
1760
1761
1762
1763
1764int dmar_reenable_qi(struct intel_iommu *iommu)
1765{
1766 if (!ecap_qis(iommu->ecap))
1767 return -ENOENT;
1768
1769 if (!iommu->qi)
1770 return -ENOENT;
1771
1772
1773
1774
1775 dmar_disable_qi(iommu);
1776
1777
1778
1779
1780
1781 __dmar_enable_qi(iommu);
1782
1783 return 0;
1784}
1785
1786
1787
1788
1789int __init dmar_ir_support(void)
1790{
1791 struct acpi_table_dmar *dmar;
1792 dmar = (struct acpi_table_dmar *)dmar_tbl;
1793 if (!dmar)
1794 return 0;
1795 return dmar->flags & 0x1;
1796}
1797
1798
1799static inline bool dmar_in_use(void)
1800{
1801 return irq_remapping_enabled || intel_iommu_enabled;
1802}
1803
1804static int __init dmar_free_unused_resources(void)
1805{
1806 struct dmar_drhd_unit *dmaru, *dmaru_n;
1807
1808 if (dmar_in_use())
1809 return 0;
1810
1811 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1812 bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1813
1814 down_write(&dmar_global_lock);
1815 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1816 list_del(&dmaru->list);
1817 dmar_free_drhd(dmaru);
1818 }
1819 up_write(&dmar_global_lock);
1820
1821 return 0;
1822}
1823
1824late_initcall(dmar_free_unused_resources);
1825IOMMU_INIT_POST(detect_intel_iommu);
1826
1827
1828
1829
1830
1831
1832
1833static guid_t dmar_hp_guid =
1834 GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B,
1835 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF);
1836
1837
1838
1839
1840
1841#define DMAR_DSM_REV_ID 0
1842#define DMAR_DSM_FUNC_DRHD 1
1843#define DMAR_DSM_FUNC_ATSR 2
1844#define DMAR_DSM_FUNC_RHSA 3
1845
1846static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1847{
1848 return acpi_check_dsm(handle, &dmar_hp_guid, DMAR_DSM_REV_ID, 1 << func);
1849}
1850
1851static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1852 dmar_res_handler_t handler, void *arg)
1853{
1854 int ret = -ENODEV;
1855 union acpi_object *obj;
1856 struct acpi_dmar_header *start;
1857 struct dmar_res_callback callback;
1858 static int res_type[] = {
1859 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1860 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1861 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1862 };
1863
1864 if (!dmar_detect_dsm(handle, func))
1865 return 0;
1866
1867 obj = acpi_evaluate_dsm_typed(handle, &dmar_hp_guid, DMAR_DSM_REV_ID,
1868 func, NULL, ACPI_TYPE_BUFFER);
1869 if (!obj)
1870 return -ENODEV;
1871
1872 memset(&callback, 0, sizeof(callback));
1873 callback.cb[res_type[func]] = handler;
1874 callback.arg[res_type[func]] = arg;
1875 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1876 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1877
1878 ACPI_FREE(obj);
1879
1880 return ret;
1881}
1882
1883static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1884{
1885 int ret;
1886 struct dmar_drhd_unit *dmaru;
1887
1888 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1889 if (!dmaru)
1890 return -ENODEV;
1891
1892 ret = dmar_ir_hotplug(dmaru, true);
1893 if (ret == 0)
1894 ret = dmar_iommu_hotplug(dmaru, true);
1895
1896 return ret;
1897}
1898
1899static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1900{
1901 int i, ret;
1902 struct device *dev;
1903 struct dmar_drhd_unit *dmaru;
1904
1905 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1906 if (!dmaru)
1907 return 0;
1908
1909
1910
1911
1912 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
1913 for_each_active_dev_scope(dmaru->devices,
1914 dmaru->devices_cnt, i, dev)
1915 return -EBUSY;
1916 }
1917
1918 ret = dmar_ir_hotplug(dmaru, false);
1919 if (ret == 0)
1920 ret = dmar_iommu_hotplug(dmaru, false);
1921
1922 return ret;
1923}
1924
1925static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1926{
1927 struct dmar_drhd_unit *dmaru;
1928
1929 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1930 if (dmaru) {
1931 list_del_rcu(&dmaru->list);
1932 synchronize_rcu();
1933 dmar_free_drhd(dmaru);
1934 }
1935
1936 return 0;
1937}
1938
1939static int dmar_hotplug_insert(acpi_handle handle)
1940{
1941 int ret;
1942 int drhd_count = 0;
1943
1944 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1945 &dmar_validate_one_drhd, (void *)1);
1946 if (ret)
1947 goto out;
1948
1949 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1950 &dmar_parse_one_drhd, (void *)&drhd_count);
1951 if (ret == 0 && drhd_count == 0) {
1952 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1953 goto out;
1954 } else if (ret) {
1955 goto release_drhd;
1956 }
1957
1958 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1959 &dmar_parse_one_rhsa, NULL);
1960 if (ret)
1961 goto release_drhd;
1962
1963 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1964 &dmar_parse_one_atsr, NULL);
1965 if (ret)
1966 goto release_atsr;
1967
1968 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1969 &dmar_hp_add_drhd, NULL);
1970 if (!ret)
1971 return 0;
1972
1973 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1974 &dmar_hp_remove_drhd, NULL);
1975release_atsr:
1976 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1977 &dmar_release_one_atsr, NULL);
1978release_drhd:
1979 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1980 &dmar_hp_release_drhd, NULL);
1981out:
1982 return ret;
1983}
1984
1985static int dmar_hotplug_remove(acpi_handle handle)
1986{
1987 int ret;
1988
1989 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1990 &dmar_check_one_atsr, NULL);
1991 if (ret)
1992 return ret;
1993
1994 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1995 &dmar_hp_remove_drhd, NULL);
1996 if (ret == 0) {
1997 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1998 &dmar_release_one_atsr, NULL));
1999 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2000 &dmar_hp_release_drhd, NULL));
2001 } else {
2002 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
2003 &dmar_hp_add_drhd, NULL);
2004 }
2005
2006 return ret;
2007}
2008
2009static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
2010 void *context, void **retval)
2011{
2012 acpi_handle *phdl = retval;
2013
2014 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2015 *phdl = handle;
2016 return AE_CTRL_TERMINATE;
2017 }
2018
2019 return AE_OK;
2020}
2021
2022static int dmar_device_hotplug(acpi_handle handle, bool insert)
2023{
2024 int ret;
2025 acpi_handle tmp = NULL;
2026 acpi_status status;
2027
2028 if (!dmar_in_use())
2029 return 0;
2030
2031 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
2032 tmp = handle;
2033 } else {
2034 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
2035 ACPI_UINT32_MAX,
2036 dmar_get_dsm_handle,
2037 NULL, NULL, &tmp);
2038 if (ACPI_FAILURE(status)) {
2039 pr_warn("Failed to locate _DSM method.\n");
2040 return -ENXIO;
2041 }
2042 }
2043 if (tmp == NULL)
2044 return 0;
2045
2046 down_write(&dmar_global_lock);
2047 if (insert)
2048 ret = dmar_hotplug_insert(tmp);
2049 else
2050 ret = dmar_hotplug_remove(tmp);
2051 up_write(&dmar_global_lock);
2052
2053 return ret;
2054}
2055
2056int dmar_device_add(acpi_handle handle)
2057{
2058 return dmar_device_hotplug(handle, true);
2059}
2060
2061int dmar_device_remove(acpi_handle handle)
2062{
2063 return dmar_device_hotplug(handle, false);
2064}
2065
2066
2067
2068
2069
2070
2071
2072
2073bool dmar_platform_optin(void)
2074{
2075 struct acpi_table_dmar *dmar;
2076 acpi_status status;
2077 bool ret;
2078
2079 status = acpi_get_table(ACPI_SIG_DMAR, 0,
2080 (struct acpi_table_header **)&dmar);
2081 if (ACPI_FAILURE(status))
2082 return false;
2083
2084 ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN);
2085 acpi_put_table((struct acpi_table_header *)dmar);
2086
2087 return ret;
2088}
2089EXPORT_SYMBOL_GPL(dmar_platform_optin);
2090