1
2
3
4
5
6
7#define pr_fmt(fmt) "iommu: " fmt
8
9#include <linux/device.h>
10#include <linux/kernel.h>
11#include <linux/bug.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/export.h>
15#include <linux/slab.h>
16#include <linux/errno.h>
17#include <linux/iommu.h>
18#include <linux/idr.h>
19#include <linux/notifier.h>
20#include <linux/err.h>
21#include <linux/pci.h>
22#include <linux/bitops.h>
23#include <linux/property.h>
24#include <linux/fsl/mc.h>
25#include <linux/module.h>
26#include <trace/events/iommu.h>
27
28static struct kset *iommu_group_kset;
29static DEFINE_IDA(iommu_group_ida);
30
31static unsigned int iommu_def_domain_type __read_mostly;
32static bool iommu_dma_strict __read_mostly = true;
33static u32 iommu_cmd_line __read_mostly;
34
35struct iommu_group {
36 struct kobject kobj;
37 struct kobject *devices_kobj;
38 struct list_head devices;
39 struct mutex mutex;
40 struct blocking_notifier_head notifier;
41 void *iommu_data;
42 void (*iommu_data_release)(void *iommu_data);
43 char *name;
44 int id;
45 struct iommu_domain *default_domain;
46 struct iommu_domain *domain;
47};
48
49struct group_device {
50 struct list_head list;
51 struct device *dev;
52 char *name;
53};
54
55struct iommu_group_attribute {
56 struct attribute attr;
57 ssize_t (*show)(struct iommu_group *group, char *buf);
58 ssize_t (*store)(struct iommu_group *group,
59 const char *buf, size_t count);
60};
61
62static const char * const iommu_group_resv_type_string[] = {
63 [IOMMU_RESV_DIRECT] = "direct",
64 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
65 [IOMMU_RESV_RESERVED] = "reserved",
66 [IOMMU_RESV_MSI] = "msi",
67 [IOMMU_RESV_SW_MSI] = "msi",
68};
69
70#define IOMMU_CMD_LINE_DMA_API BIT(0)
71
72static void iommu_set_cmd_line_dma_api(void)
73{
74 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
75}
76
77static bool iommu_cmd_line_dma_api(void)
78{
79 return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
80}
81
82#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
83struct iommu_group_attribute iommu_group_attr_##_name = \
84 __ATTR(_name, _mode, _show, _store)
85
86#define to_iommu_group_attr(_attr) \
87 container_of(_attr, struct iommu_group_attribute, attr)
88#define to_iommu_group(_kobj) \
89 container_of(_kobj, struct iommu_group, kobj)
90
91static LIST_HEAD(iommu_device_list);
92static DEFINE_SPINLOCK(iommu_device_lock);
93
94
95
96
97
98static const char *iommu_domain_type_str(unsigned int t)
99{
100 switch (t) {
101 case IOMMU_DOMAIN_BLOCKED:
102 return "Blocked";
103 case IOMMU_DOMAIN_IDENTITY:
104 return "Passthrough";
105 case IOMMU_DOMAIN_UNMANAGED:
106 return "Unmanaged";
107 case IOMMU_DOMAIN_DMA:
108 return "Translated";
109 default:
110 return "Unknown";
111 }
112}
113
114static int __init iommu_subsys_init(void)
115{
116 bool cmd_line = iommu_cmd_line_dma_api();
117
118 if (!cmd_line) {
119 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
120 iommu_set_default_passthrough(false);
121 else
122 iommu_set_default_translated(false);
123
124 if (iommu_default_passthrough() && mem_encrypt_active()) {
125 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
126 iommu_set_default_translated(false);
127 }
128 }
129
130 pr_info("Default domain type: %s %s\n",
131 iommu_domain_type_str(iommu_def_domain_type),
132 cmd_line ? "(set via kernel command line)" : "");
133
134 return 0;
135}
136subsys_initcall(iommu_subsys_init);
137
138int iommu_device_register(struct iommu_device *iommu)
139{
140 spin_lock(&iommu_device_lock);
141 list_add_tail(&iommu->list, &iommu_device_list);
142 spin_unlock(&iommu_device_lock);
143 return 0;
144}
145EXPORT_SYMBOL_GPL(iommu_device_register);
146
147void iommu_device_unregister(struct iommu_device *iommu)
148{
149 spin_lock(&iommu_device_lock);
150 list_del(&iommu->list);
151 spin_unlock(&iommu_device_lock);
152}
153EXPORT_SYMBOL_GPL(iommu_device_unregister);
154
155static struct dev_iommu *dev_iommu_get(struct device *dev)
156{
157 struct dev_iommu *param = dev->iommu;
158
159 if (param)
160 return param;
161
162 param = kzalloc(sizeof(*param), GFP_KERNEL);
163 if (!param)
164 return NULL;
165
166 mutex_init(¶m->lock);
167 dev->iommu = param;
168 return param;
169}
170
171static void dev_iommu_free(struct device *dev)
172{
173 iommu_fwspec_free(dev);
174 kfree(dev->iommu);
175 dev->iommu = NULL;
176}
177
178int iommu_probe_device(struct device *dev)
179{
180 const struct iommu_ops *ops = dev->bus->iommu_ops;
181 int ret;
182
183 WARN_ON(dev->iommu_group);
184 if (!ops)
185 return -EINVAL;
186
187 if (!dev_iommu_get(dev))
188 return -ENOMEM;
189
190 if (!try_module_get(ops->owner)) {
191 ret = -EINVAL;
192 goto err_free_dev_param;
193 }
194
195 ret = ops->add_device(dev);
196 if (ret)
197 goto err_module_put;
198
199 return 0;
200
201err_module_put:
202 module_put(ops->owner);
203err_free_dev_param:
204 dev_iommu_free(dev);
205 return ret;
206}
207
208void iommu_release_device(struct device *dev)
209{
210 const struct iommu_ops *ops = dev->bus->iommu_ops;
211
212 if (dev->iommu_group)
213 ops->remove_device(dev);
214
215 if (dev->iommu) {
216 module_put(ops->owner);
217 dev_iommu_free(dev);
218 }
219}
220
221static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
222 unsigned type);
223static int __iommu_attach_device(struct iommu_domain *domain,
224 struct device *dev);
225static int __iommu_attach_group(struct iommu_domain *domain,
226 struct iommu_group *group);
227static void __iommu_detach_group(struct iommu_domain *domain,
228 struct iommu_group *group);
229
230static int __init iommu_set_def_domain_type(char *str)
231{
232 bool pt;
233 int ret;
234
235 ret = kstrtobool(str, &pt);
236 if (ret)
237 return ret;
238
239 if (pt)
240 iommu_set_default_passthrough(true);
241 else
242 iommu_set_default_translated(true);
243
244 return 0;
245}
246early_param("iommu.passthrough", iommu_set_def_domain_type);
247
248static int __init iommu_dma_setup(char *str)
249{
250 return kstrtobool(str, &iommu_dma_strict);
251}
252early_param("iommu.strict", iommu_dma_setup);
253
254static ssize_t iommu_group_attr_show(struct kobject *kobj,
255 struct attribute *__attr, char *buf)
256{
257 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
258 struct iommu_group *group = to_iommu_group(kobj);
259 ssize_t ret = -EIO;
260
261 if (attr->show)
262 ret = attr->show(group, buf);
263 return ret;
264}
265
266static ssize_t iommu_group_attr_store(struct kobject *kobj,
267 struct attribute *__attr,
268 const char *buf, size_t count)
269{
270 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
271 struct iommu_group *group = to_iommu_group(kobj);
272 ssize_t ret = -EIO;
273
274 if (attr->store)
275 ret = attr->store(group, buf, count);
276 return ret;
277}
278
279static const struct sysfs_ops iommu_group_sysfs_ops = {
280 .show = iommu_group_attr_show,
281 .store = iommu_group_attr_store,
282};
283
284static int iommu_group_create_file(struct iommu_group *group,
285 struct iommu_group_attribute *attr)
286{
287 return sysfs_create_file(&group->kobj, &attr->attr);
288}
289
290static void iommu_group_remove_file(struct iommu_group *group,
291 struct iommu_group_attribute *attr)
292{
293 sysfs_remove_file(&group->kobj, &attr->attr);
294}
295
296static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
297{
298 return sprintf(buf, "%s\n", group->name);
299}
300
301
302
303
304
305
306
307
308
309
310int iommu_insert_resv_region(struct iommu_resv_region *new,
311 struct list_head *regions)
312{
313 struct iommu_resv_region *iter, *tmp, *nr, *top;
314 LIST_HEAD(stack);
315
316 nr = iommu_alloc_resv_region(new->start, new->length,
317 new->prot, new->type);
318 if (!nr)
319 return -ENOMEM;
320
321
322 list_for_each_entry(iter, regions, list) {
323 if (nr->start < iter->start ||
324 (nr->start == iter->start && nr->type <= iter->type))
325 break;
326 }
327 list_add_tail(&nr->list, &iter->list);
328
329
330 list_for_each_entry_safe(iter, tmp, regions, list) {
331 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
332
333
334 if (iter->type != new->type) {
335 list_move_tail(&iter->list, &stack);
336 continue;
337 }
338
339
340 list_for_each_entry_reverse(top, &stack, list)
341 if (top->type == iter->type)
342 goto check_overlap;
343
344 list_move_tail(&iter->list, &stack);
345 continue;
346
347check_overlap:
348 top_end = top->start + top->length - 1;
349
350 if (iter->start > top_end + 1) {
351 list_move_tail(&iter->list, &stack);
352 } else {
353 top->length = max(top_end, iter_end) - top->start + 1;
354 list_del(&iter->list);
355 kfree(iter);
356 }
357 }
358 list_splice(&stack, regions);
359 return 0;
360}
361
362static int
363iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
364 struct list_head *group_resv_regions)
365{
366 struct iommu_resv_region *entry;
367 int ret = 0;
368
369 list_for_each_entry(entry, dev_resv_regions, list) {
370 ret = iommu_insert_resv_region(entry, group_resv_regions);
371 if (ret)
372 break;
373 }
374 return ret;
375}
376
377int iommu_get_group_resv_regions(struct iommu_group *group,
378 struct list_head *head)
379{
380 struct group_device *device;
381 int ret = 0;
382
383 mutex_lock(&group->mutex);
384 list_for_each_entry(device, &group->devices, list) {
385 struct list_head dev_resv_regions;
386
387 INIT_LIST_HEAD(&dev_resv_regions);
388 iommu_get_resv_regions(device->dev, &dev_resv_regions);
389 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
390 iommu_put_resv_regions(device->dev, &dev_resv_regions);
391 if (ret)
392 break;
393 }
394 mutex_unlock(&group->mutex);
395 return ret;
396}
397EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
398
399static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
400 char *buf)
401{
402 struct iommu_resv_region *region, *next;
403 struct list_head group_resv_regions;
404 char *str = buf;
405
406 INIT_LIST_HEAD(&group_resv_regions);
407 iommu_get_group_resv_regions(group, &group_resv_regions);
408
409 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
410 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
411 (long long int)region->start,
412 (long long int)(region->start +
413 region->length - 1),
414 iommu_group_resv_type_string[region->type]);
415 kfree(region);
416 }
417
418 return (str - buf);
419}
420
421static ssize_t iommu_group_show_type(struct iommu_group *group,
422 char *buf)
423{
424 char *type = "unknown\n";
425
426 if (group->default_domain) {
427 switch (group->default_domain->type) {
428 case IOMMU_DOMAIN_BLOCKED:
429 type = "blocked\n";
430 break;
431 case IOMMU_DOMAIN_IDENTITY:
432 type = "identity\n";
433 break;
434 case IOMMU_DOMAIN_UNMANAGED:
435 type = "unmanaged\n";
436 break;
437 case IOMMU_DOMAIN_DMA:
438 type = "DMA\n";
439 break;
440 }
441 }
442 strcpy(buf, type);
443
444 return strlen(type);
445}
446
447static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
448
449static IOMMU_GROUP_ATTR(reserved_regions, 0444,
450 iommu_group_show_resv_regions, NULL);
451
452static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
453
454static void iommu_group_release(struct kobject *kobj)
455{
456 struct iommu_group *group = to_iommu_group(kobj);
457
458 pr_debug("Releasing group %d\n", group->id);
459
460 if (group->iommu_data_release)
461 group->iommu_data_release(group->iommu_data);
462
463 ida_simple_remove(&iommu_group_ida, group->id);
464
465 if (group->default_domain)
466 iommu_domain_free(group->default_domain);
467
468 kfree(group->name);
469 kfree(group);
470}
471
472static struct kobj_type iommu_group_ktype = {
473 .sysfs_ops = &iommu_group_sysfs_ops,
474 .release = iommu_group_release,
475};
476
477
478
479
480
481
482
483
484
485
486
487
488struct iommu_group *iommu_group_alloc(void)
489{
490 struct iommu_group *group;
491 int ret;
492
493 group = kzalloc(sizeof(*group), GFP_KERNEL);
494 if (!group)
495 return ERR_PTR(-ENOMEM);
496
497 group->kobj.kset = iommu_group_kset;
498 mutex_init(&group->mutex);
499 INIT_LIST_HEAD(&group->devices);
500 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
501
502 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
503 if (ret < 0) {
504 kfree(group);
505 return ERR_PTR(ret);
506 }
507 group->id = ret;
508
509 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
510 NULL, "%d", group->id);
511 if (ret) {
512 ida_simple_remove(&iommu_group_ida, group->id);
513 kobject_put(&group->kobj);
514 return ERR_PTR(ret);
515 }
516
517 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
518 if (!group->devices_kobj) {
519 kobject_put(&group->kobj);
520 return ERR_PTR(-ENOMEM);
521 }
522
523
524
525
526
527
528 kobject_put(&group->kobj);
529
530 ret = iommu_group_create_file(group,
531 &iommu_group_attr_reserved_regions);
532 if (ret)
533 return ERR_PTR(ret);
534
535 ret = iommu_group_create_file(group, &iommu_group_attr_type);
536 if (ret)
537 return ERR_PTR(ret);
538
539 pr_debug("Allocated group %d\n", group->id);
540
541 return group;
542}
543EXPORT_SYMBOL_GPL(iommu_group_alloc);
544
545struct iommu_group *iommu_group_get_by_id(int id)
546{
547 struct kobject *group_kobj;
548 struct iommu_group *group;
549 const char *name;
550
551 if (!iommu_group_kset)
552 return NULL;
553
554 name = kasprintf(GFP_KERNEL, "%d", id);
555 if (!name)
556 return NULL;
557
558 group_kobj = kset_find_obj(iommu_group_kset, name);
559 kfree(name);
560
561 if (!group_kobj)
562 return NULL;
563
564 group = container_of(group_kobj, struct iommu_group, kobj);
565 BUG_ON(group->id != id);
566
567 kobject_get(group->devices_kobj);
568 kobject_put(&group->kobj);
569
570 return group;
571}
572EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
573
574
575
576
577
578
579
580
581
582void *iommu_group_get_iommudata(struct iommu_group *group)
583{
584 return group->iommu_data;
585}
586EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
587
588
589
590
591
592
593
594
595
596
597
598void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
599 void (*release)(void *iommu_data))
600{
601 group->iommu_data = iommu_data;
602 group->iommu_data_release = release;
603}
604EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
605
606
607
608
609
610
611
612
613
614int iommu_group_set_name(struct iommu_group *group, const char *name)
615{
616 int ret;
617
618 if (group->name) {
619 iommu_group_remove_file(group, &iommu_group_attr_name);
620 kfree(group->name);
621 group->name = NULL;
622 if (!name)
623 return 0;
624 }
625
626 group->name = kstrdup(name, GFP_KERNEL);
627 if (!group->name)
628 return -ENOMEM;
629
630 ret = iommu_group_create_file(group, &iommu_group_attr_name);
631 if (ret) {
632 kfree(group->name);
633 group->name = NULL;
634 return ret;
635 }
636
637 return 0;
638}
639EXPORT_SYMBOL_GPL(iommu_group_set_name);
640
641static int iommu_group_create_direct_mappings(struct iommu_group *group,
642 struct device *dev)
643{
644 struct iommu_domain *domain = group->default_domain;
645 struct iommu_resv_region *entry;
646 struct list_head mappings;
647 unsigned long pg_size;
648 int ret = 0;
649
650 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
651 return 0;
652
653 BUG_ON(!domain->pgsize_bitmap);
654
655 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
656 INIT_LIST_HEAD(&mappings);
657
658 iommu_get_resv_regions(dev, &mappings);
659
660
661 list_for_each_entry(entry, &mappings, list) {
662 dma_addr_t start, end, addr;
663
664 if (domain->ops->apply_resv_region)
665 domain->ops->apply_resv_region(dev, domain, entry);
666
667 start = ALIGN(entry->start, pg_size);
668 end = ALIGN(entry->start + entry->length, pg_size);
669
670 if (entry->type != IOMMU_RESV_DIRECT &&
671 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
672 continue;
673
674 for (addr = start; addr < end; addr += pg_size) {
675 phys_addr_t phys_addr;
676
677 phys_addr = iommu_iova_to_phys(domain, addr);
678 if (phys_addr)
679 continue;
680
681 ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
682 if (ret)
683 goto out;
684 }
685
686 }
687
688 iommu_flush_tlb_all(domain);
689
690out:
691 iommu_put_resv_regions(dev, &mappings);
692
693 return ret;
694}
695
696static bool iommu_is_attach_deferred(struct iommu_domain *domain,
697 struct device *dev)
698{
699 if (domain->ops->is_attach_deferred)
700 return domain->ops->is_attach_deferred(domain, dev);
701
702 return false;
703}
704
705
706
707
708
709
710
711
712
713int iommu_group_add_device(struct iommu_group *group, struct device *dev)
714{
715 int ret, i = 0;
716 struct group_device *device;
717
718 device = kzalloc(sizeof(*device), GFP_KERNEL);
719 if (!device)
720 return -ENOMEM;
721
722 device->dev = dev;
723
724 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
725 if (ret)
726 goto err_free_device;
727
728 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
729rename:
730 if (!device->name) {
731 ret = -ENOMEM;
732 goto err_remove_link;
733 }
734
735 ret = sysfs_create_link_nowarn(group->devices_kobj,
736 &dev->kobj, device->name);
737 if (ret) {
738 if (ret == -EEXIST && i >= 0) {
739
740
741
742
743 kfree(device->name);
744 device->name = kasprintf(GFP_KERNEL, "%s.%d",
745 kobject_name(&dev->kobj), i++);
746 goto rename;
747 }
748 goto err_free_name;
749 }
750
751 kobject_get(group->devices_kobj);
752
753 dev->iommu_group = group;
754
755 iommu_group_create_direct_mappings(group, dev);
756
757 mutex_lock(&group->mutex);
758 list_add_tail(&device->list, &group->devices);
759 if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
760 ret = __iommu_attach_device(group->domain, dev);
761 mutex_unlock(&group->mutex);
762 if (ret)
763 goto err_put_group;
764
765
766 blocking_notifier_call_chain(&group->notifier,
767 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
768
769 trace_add_device_to_group(group->id, dev);
770
771 dev_info(dev, "Adding to iommu group %d\n", group->id);
772
773 return 0;
774
775err_put_group:
776 mutex_lock(&group->mutex);
777 list_del(&device->list);
778 mutex_unlock(&group->mutex);
779 dev->iommu_group = NULL;
780 kobject_put(group->devices_kobj);
781 sysfs_remove_link(group->devices_kobj, device->name);
782err_free_name:
783 kfree(device->name);
784err_remove_link:
785 sysfs_remove_link(&dev->kobj, "iommu_group");
786err_free_device:
787 kfree(device);
788 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
789 return ret;
790}
791EXPORT_SYMBOL_GPL(iommu_group_add_device);
792
793
794
795
796
797
798
799
800void iommu_group_remove_device(struct device *dev)
801{
802 struct iommu_group *group = dev->iommu_group;
803 struct group_device *tmp_device, *device = NULL;
804
805 dev_info(dev, "Removing from iommu group %d\n", group->id);
806
807
808 blocking_notifier_call_chain(&group->notifier,
809 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
810
811 mutex_lock(&group->mutex);
812 list_for_each_entry(tmp_device, &group->devices, list) {
813 if (tmp_device->dev == dev) {
814 device = tmp_device;
815 list_del(&device->list);
816 break;
817 }
818 }
819 mutex_unlock(&group->mutex);
820
821 if (!device)
822 return;
823
824 sysfs_remove_link(group->devices_kobj, device->name);
825 sysfs_remove_link(&dev->kobj, "iommu_group");
826
827 trace_remove_device_from_group(group->id, dev);
828
829 kfree(device->name);
830 kfree(device);
831 dev->iommu_group = NULL;
832 kobject_put(group->devices_kobj);
833}
834EXPORT_SYMBOL_GPL(iommu_group_remove_device);
835
836static int iommu_group_device_count(struct iommu_group *group)
837{
838 struct group_device *entry;
839 int ret = 0;
840
841 list_for_each_entry(entry, &group->devices, list)
842 ret++;
843
844 return ret;
845}
846
847
848
849
850
851
852
853
854
855
856
857
858static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
859 int (*fn)(struct device *, void *))
860{
861 struct group_device *device;
862 int ret = 0;
863
864 list_for_each_entry(device, &group->devices, list) {
865 ret = fn(device->dev, data);
866 if (ret)
867 break;
868 }
869 return ret;
870}
871
872
873int iommu_group_for_each_dev(struct iommu_group *group, void *data,
874 int (*fn)(struct device *, void *))
875{
876 int ret;
877
878 mutex_lock(&group->mutex);
879 ret = __iommu_group_for_each_dev(group, data, fn);
880 mutex_unlock(&group->mutex);
881
882 return ret;
883}
884EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
885
886
887
888
889
890
891
892
893
894struct iommu_group *iommu_group_get(struct device *dev)
895{
896 struct iommu_group *group = dev->iommu_group;
897
898 if (group)
899 kobject_get(group->devices_kobj);
900
901 return group;
902}
903EXPORT_SYMBOL_GPL(iommu_group_get);
904
905
906
907
908
909
910
911
912struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
913{
914 kobject_get(group->devices_kobj);
915 return group;
916}
917EXPORT_SYMBOL_GPL(iommu_group_ref_get);
918
919
920
921
922
923
924
925
926void iommu_group_put(struct iommu_group *group)
927{
928 if (group)
929 kobject_put(group->devices_kobj);
930}
931EXPORT_SYMBOL_GPL(iommu_group_put);
932
933
934
935
936
937
938
939
940
941
942int iommu_group_register_notifier(struct iommu_group *group,
943 struct notifier_block *nb)
944{
945 return blocking_notifier_chain_register(&group->notifier, nb);
946}
947EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
948
949
950
951
952
953
954
955
956int iommu_group_unregister_notifier(struct iommu_group *group,
957 struct notifier_block *nb)
958{
959 return blocking_notifier_chain_unregister(&group->notifier, nb);
960}
961EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981int iommu_register_device_fault_handler(struct device *dev,
982 iommu_dev_fault_handler_t handler,
983 void *data)
984{
985 struct dev_iommu *param = dev->iommu;
986 int ret = 0;
987
988 if (!param)
989 return -EINVAL;
990
991 mutex_lock(¶m->lock);
992
993 if (param->fault_param) {
994 ret = -EBUSY;
995 goto done_unlock;
996 }
997
998 get_device(dev);
999 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1000 if (!param->fault_param) {
1001 put_device(dev);
1002 ret = -ENOMEM;
1003 goto done_unlock;
1004 }
1005 param->fault_param->handler = handler;
1006 param->fault_param->data = data;
1007 mutex_init(¶m->fault_param->lock);
1008 INIT_LIST_HEAD(¶m->fault_param->faults);
1009
1010done_unlock:
1011 mutex_unlock(¶m->lock);
1012
1013 return ret;
1014}
1015EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026int iommu_unregister_device_fault_handler(struct device *dev)
1027{
1028 struct dev_iommu *param = dev->iommu;
1029 int ret = 0;
1030
1031 if (!param)
1032 return -EINVAL;
1033
1034 mutex_lock(¶m->lock);
1035
1036 if (!param->fault_param)
1037 goto unlock;
1038
1039
1040 if (!list_empty(¶m->fault_param->faults)) {
1041 ret = -EBUSY;
1042 goto unlock;
1043 }
1044
1045 kfree(param->fault_param);
1046 param->fault_param = NULL;
1047 put_device(dev);
1048unlock:
1049 mutex_unlock(¶m->lock);
1050
1051 return ret;
1052}
1053EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1067{
1068 struct dev_iommu *param = dev->iommu;
1069 struct iommu_fault_event *evt_pending = NULL;
1070 struct iommu_fault_param *fparam;
1071 int ret = 0;
1072
1073 if (!param || !evt)
1074 return -EINVAL;
1075
1076
1077 mutex_lock(¶m->lock);
1078 fparam = param->fault_param;
1079 if (!fparam || !fparam->handler) {
1080 ret = -EINVAL;
1081 goto done_unlock;
1082 }
1083
1084 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1085 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1086 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1087 GFP_KERNEL);
1088 if (!evt_pending) {
1089 ret = -ENOMEM;
1090 goto done_unlock;
1091 }
1092 mutex_lock(&fparam->lock);
1093 list_add_tail(&evt_pending->list, &fparam->faults);
1094 mutex_unlock(&fparam->lock);
1095 }
1096
1097 ret = fparam->handler(&evt->fault, fparam->data);
1098 if (ret && evt_pending) {
1099 mutex_lock(&fparam->lock);
1100 list_del(&evt_pending->list);
1101 mutex_unlock(&fparam->lock);
1102 kfree(evt_pending);
1103 }
1104done_unlock:
1105 mutex_unlock(¶m->lock);
1106 return ret;
1107}
1108EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1109
1110int iommu_page_response(struct device *dev,
1111 struct iommu_page_response *msg)
1112{
1113 bool pasid_valid;
1114 int ret = -EINVAL;
1115 struct iommu_fault_event *evt;
1116 struct iommu_fault_page_request *prm;
1117 struct dev_iommu *param = dev->iommu;
1118 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1119
1120 if (!domain || !domain->ops->page_response)
1121 return -ENODEV;
1122
1123 if (!param || !param->fault_param)
1124 return -EINVAL;
1125
1126 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1127 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1128 return -EINVAL;
1129
1130
1131 mutex_lock(¶m->fault_param->lock);
1132 if (list_empty(¶m->fault_param->faults)) {
1133 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1134 goto done_unlock;
1135 }
1136
1137
1138
1139
1140 list_for_each_entry(evt, ¶m->fault_param->faults, list) {
1141 prm = &evt->fault.prm;
1142 pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1143
1144 if ((pasid_valid && prm->pasid != msg->pasid) ||
1145 prm->grpid != msg->grpid)
1146 continue;
1147
1148
1149 msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1150
1151 ret = domain->ops->page_response(dev, evt, msg);
1152 list_del(&evt->list);
1153 kfree(evt);
1154 break;
1155 }
1156
1157done_unlock:
1158 mutex_unlock(¶m->fault_param->lock);
1159 return ret;
1160}
1161EXPORT_SYMBOL_GPL(iommu_page_response);
1162
1163
1164
1165
1166
1167
1168
1169int iommu_group_id(struct iommu_group *group)
1170{
1171 return group->id;
1172}
1173EXPORT_SYMBOL_GPL(iommu_group_id);
1174
1175static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1176 unsigned long *devfns);
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1187
1188
1189
1190
1191
1192
1193
1194static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1195 unsigned long *devfns)
1196{
1197 struct pci_dev *tmp = NULL;
1198 struct iommu_group *group;
1199
1200 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1201 return NULL;
1202
1203 for_each_pci_dev(tmp) {
1204 if (tmp == pdev || tmp->bus != pdev->bus ||
1205 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1206 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1207 continue;
1208
1209 group = get_pci_alias_group(tmp, devfns);
1210 if (group) {
1211 pci_dev_put(tmp);
1212 return group;
1213 }
1214 }
1215
1216 return NULL;
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1229 unsigned long *devfns)
1230{
1231 struct pci_dev *tmp = NULL;
1232 struct iommu_group *group;
1233
1234 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1235 return NULL;
1236
1237 group = iommu_group_get(&pdev->dev);
1238 if (group)
1239 return group;
1240
1241 for_each_pci_dev(tmp) {
1242 if (tmp == pdev || tmp->bus != pdev->bus)
1243 continue;
1244
1245
1246 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1247 group = get_pci_alias_group(tmp, devfns);
1248 if (group) {
1249 pci_dev_put(tmp);
1250 return group;
1251 }
1252
1253 group = get_pci_function_alias_group(tmp, devfns);
1254 if (group) {
1255 pci_dev_put(tmp);
1256 return group;
1257 }
1258 }
1259 }
1260
1261 return NULL;
1262}
1263
1264struct group_for_pci_data {
1265 struct pci_dev *pdev;
1266 struct iommu_group *group;
1267};
1268
1269
1270
1271
1272
1273static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1274{
1275 struct group_for_pci_data *data = opaque;
1276
1277 data->pdev = pdev;
1278 data->group = iommu_group_get(&pdev->dev);
1279
1280 return data->group != NULL;
1281}
1282
1283
1284
1285
1286
1287struct iommu_group *generic_device_group(struct device *dev)
1288{
1289 return iommu_group_alloc();
1290}
1291EXPORT_SYMBOL_GPL(generic_device_group);
1292
1293
1294
1295
1296
1297struct iommu_group *pci_device_group(struct device *dev)
1298{
1299 struct pci_dev *pdev = to_pci_dev(dev);
1300 struct group_for_pci_data data;
1301 struct pci_bus *bus;
1302 struct iommu_group *group = NULL;
1303 u64 devfns[4] = { 0 };
1304
1305 if (WARN_ON(!dev_is_pci(dev)))
1306 return ERR_PTR(-EINVAL);
1307
1308
1309
1310
1311
1312
1313
1314 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1315 return data.group;
1316
1317 pdev = data.pdev;
1318
1319
1320
1321
1322
1323
1324
1325 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1326 if (!bus->self)
1327 continue;
1328
1329 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1330 break;
1331
1332 pdev = bus->self;
1333
1334 group = iommu_group_get(&pdev->dev);
1335 if (group)
1336 return group;
1337 }
1338
1339
1340
1341
1342
1343 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1344 if (group)
1345 return group;
1346
1347
1348
1349
1350
1351
1352 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1353 if (group)
1354 return group;
1355
1356
1357 return iommu_group_alloc();
1358}
1359EXPORT_SYMBOL_GPL(pci_device_group);
1360
1361
1362struct iommu_group *fsl_mc_device_group(struct device *dev)
1363{
1364 struct device *cont_dev = fsl_mc_cont_dev(dev);
1365 struct iommu_group *group;
1366
1367 group = iommu_group_get(cont_dev);
1368 if (!group)
1369 group = iommu_group_alloc();
1370 return group;
1371}
1372EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1385{
1386 const struct iommu_ops *ops = dev->bus->iommu_ops;
1387 struct iommu_group *group;
1388 int ret;
1389
1390 group = iommu_group_get(dev);
1391 if (group)
1392 return group;
1393
1394 if (!ops)
1395 return ERR_PTR(-EINVAL);
1396
1397 group = ops->device_group(dev);
1398 if (WARN_ON_ONCE(group == NULL))
1399 return ERR_PTR(-EINVAL);
1400
1401 if (IS_ERR(group))
1402 return group;
1403
1404
1405
1406
1407
1408 if (!group->default_domain) {
1409 struct iommu_domain *dom;
1410
1411 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1412 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1413 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1414 if (dom) {
1415 dev_warn(dev,
1416 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1417 iommu_def_domain_type);
1418 }
1419 }
1420
1421 group->default_domain = dom;
1422 if (!group->domain)
1423 group->domain = dom;
1424
1425 if (dom && !iommu_dma_strict) {
1426 int attr = 1;
1427 iommu_domain_set_attr(dom,
1428 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1429 &attr);
1430 }
1431 }
1432
1433 ret = iommu_group_add_device(group, dev);
1434 if (ret) {
1435 iommu_group_put(group);
1436 return ERR_PTR(ret);
1437 }
1438
1439 return group;
1440}
1441EXPORT_SYMBOL_GPL(iommu_group_get_for_dev);
1442
1443struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1444{
1445 return group->default_domain;
1446}
1447
1448static int add_iommu_group(struct device *dev, void *data)
1449{
1450 int ret = iommu_probe_device(dev);
1451
1452
1453
1454
1455
1456
1457 if (ret == -ENODEV)
1458 ret = 0;
1459
1460 return ret;
1461}
1462
1463static int remove_iommu_group(struct device *dev, void *data)
1464{
1465 iommu_release_device(dev);
1466
1467 return 0;
1468}
1469
1470static int iommu_bus_notifier(struct notifier_block *nb,
1471 unsigned long action, void *data)
1472{
1473 unsigned long group_action = 0;
1474 struct device *dev = data;
1475 struct iommu_group *group;
1476
1477
1478
1479
1480
1481 if (action == BUS_NOTIFY_ADD_DEVICE) {
1482 int ret;
1483
1484 ret = iommu_probe_device(dev);
1485 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1486 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1487 iommu_release_device(dev);
1488 return NOTIFY_OK;
1489 }
1490
1491
1492
1493
1494
1495 group = iommu_group_get(dev);
1496 if (!group)
1497 return 0;
1498
1499 switch (action) {
1500 case BUS_NOTIFY_BIND_DRIVER:
1501 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1502 break;
1503 case BUS_NOTIFY_BOUND_DRIVER:
1504 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1505 break;
1506 case BUS_NOTIFY_UNBIND_DRIVER:
1507 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1508 break;
1509 case BUS_NOTIFY_UNBOUND_DRIVER:
1510 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1511 break;
1512 }
1513
1514 if (group_action)
1515 blocking_notifier_call_chain(&group->notifier,
1516 group_action, dev);
1517
1518 iommu_group_put(group);
1519 return 0;
1520}
1521
1522static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1523{
1524 int err;
1525 struct notifier_block *nb;
1526
1527 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1528 if (!nb)
1529 return -ENOMEM;
1530
1531 nb->notifier_call = iommu_bus_notifier;
1532
1533 err = bus_register_notifier(bus, nb);
1534 if (err)
1535 goto out_free;
1536
1537 err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1538 if (err)
1539 goto out_err;
1540
1541
1542 return 0;
1543
1544out_err:
1545
1546 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1547 bus_unregister_notifier(bus, nb);
1548
1549out_free:
1550 kfree(nb);
1551
1552 return err;
1553}
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1569{
1570 int err;
1571
1572 if (ops == NULL) {
1573 bus->iommu_ops = NULL;
1574 return 0;
1575 }
1576
1577 if (bus->iommu_ops != NULL)
1578 return -EBUSY;
1579
1580 bus->iommu_ops = ops;
1581
1582
1583 err = iommu_bus_init(bus, ops);
1584 if (err)
1585 bus->iommu_ops = NULL;
1586
1587 return err;
1588}
1589EXPORT_SYMBOL_GPL(bus_set_iommu);
1590
1591bool iommu_present(struct bus_type *bus)
1592{
1593 return bus->iommu_ops != NULL;
1594}
1595EXPORT_SYMBOL_GPL(iommu_present);
1596
1597bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1598{
1599 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1600 return false;
1601
1602 return bus->iommu_ops->capable(cap);
1603}
1604EXPORT_SYMBOL_GPL(iommu_capable);
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618void iommu_set_fault_handler(struct iommu_domain *domain,
1619 iommu_fault_handler_t handler,
1620 void *token)
1621{
1622 BUG_ON(!domain);
1623
1624 domain->handler = handler;
1625 domain->handler_token = token;
1626}
1627EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1628
1629static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1630 unsigned type)
1631{
1632 struct iommu_domain *domain;
1633
1634 if (bus == NULL || bus->iommu_ops == NULL)
1635 return NULL;
1636
1637 domain = bus->iommu_ops->domain_alloc(type);
1638 if (!domain)
1639 return NULL;
1640
1641 domain->ops = bus->iommu_ops;
1642 domain->type = type;
1643
1644 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1645
1646 return domain;
1647}
1648
1649struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1650{
1651 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1652}
1653EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1654
1655void iommu_domain_free(struct iommu_domain *domain)
1656{
1657 domain->ops->domain_free(domain);
1658}
1659EXPORT_SYMBOL_GPL(iommu_domain_free);
1660
1661static int __iommu_attach_device(struct iommu_domain *domain,
1662 struct device *dev)
1663{
1664 int ret;
1665
1666 if (unlikely(domain->ops->attach_dev == NULL))
1667 return -ENODEV;
1668
1669 ret = domain->ops->attach_dev(domain, dev);
1670 if (!ret)
1671 trace_attach_device_to_domain(dev);
1672 return ret;
1673}
1674
1675int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1676{
1677 struct iommu_group *group;
1678 int ret;
1679
1680 group = iommu_group_get(dev);
1681 if (!group)
1682 return -ENODEV;
1683
1684
1685
1686
1687
1688 mutex_lock(&group->mutex);
1689 ret = -EINVAL;
1690 if (iommu_group_device_count(group) != 1)
1691 goto out_unlock;
1692
1693 ret = __iommu_attach_group(domain, group);
1694
1695out_unlock:
1696 mutex_unlock(&group->mutex);
1697 iommu_group_put(group);
1698
1699 return ret;
1700}
1701EXPORT_SYMBOL_GPL(iommu_attach_device);
1702
1703int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
1704 struct iommu_cache_invalidate_info *inv_info)
1705{
1706 if (unlikely(!domain->ops->cache_invalidate))
1707 return -ENODEV;
1708
1709 return domain->ops->cache_invalidate(domain, dev, inv_info);
1710}
1711EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
1712
1713int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1714 struct device *dev, struct iommu_gpasid_bind_data *data)
1715{
1716 if (unlikely(!domain->ops->sva_bind_gpasid))
1717 return -ENODEV;
1718
1719 return domain->ops->sva_bind_gpasid(domain, dev, data);
1720}
1721EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
1722
1723int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
1724 ioasid_t pasid)
1725{
1726 if (unlikely(!domain->ops->sva_unbind_gpasid))
1727 return -ENODEV;
1728
1729 return domain->ops->sva_unbind_gpasid(dev, pasid);
1730}
1731EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
1732
1733static void __iommu_detach_device(struct iommu_domain *domain,
1734 struct device *dev)
1735{
1736 if (iommu_is_attach_deferred(domain, dev))
1737 return;
1738
1739 if (unlikely(domain->ops->detach_dev == NULL))
1740 return;
1741
1742 domain->ops->detach_dev(domain, dev);
1743 trace_detach_device_from_domain(dev);
1744}
1745
1746void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1747{
1748 struct iommu_group *group;
1749
1750 group = iommu_group_get(dev);
1751 if (!group)
1752 return;
1753
1754 mutex_lock(&group->mutex);
1755 if (iommu_group_device_count(group) != 1) {
1756 WARN_ON(1);
1757 goto out_unlock;
1758 }
1759
1760 __iommu_detach_group(domain, group);
1761
1762out_unlock:
1763 mutex_unlock(&group->mutex);
1764 iommu_group_put(group);
1765}
1766EXPORT_SYMBOL_GPL(iommu_detach_device);
1767
1768struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1769{
1770 struct iommu_domain *domain;
1771 struct iommu_group *group;
1772
1773 group = iommu_group_get(dev);
1774 if (!group)
1775 return NULL;
1776
1777 domain = group->domain;
1778
1779 iommu_group_put(group);
1780
1781 return domain;
1782}
1783EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1784
1785
1786
1787
1788
1789struct iommu_domain *iommu_get_dma_domain(struct device *dev)
1790{
1791 return dev->iommu_group->default_domain;
1792}
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804static int iommu_group_do_attach_device(struct device *dev, void *data)
1805{
1806 struct iommu_domain *domain = data;
1807
1808 return __iommu_attach_device(domain, dev);
1809}
1810
1811static int __iommu_attach_group(struct iommu_domain *domain,
1812 struct iommu_group *group)
1813{
1814 int ret;
1815
1816 if (group->default_domain && group->domain != group->default_domain)
1817 return -EBUSY;
1818
1819 ret = __iommu_group_for_each_dev(group, domain,
1820 iommu_group_do_attach_device);
1821 if (ret == 0)
1822 group->domain = domain;
1823
1824 return ret;
1825}
1826
1827int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1828{
1829 int ret;
1830
1831 mutex_lock(&group->mutex);
1832 ret = __iommu_attach_group(domain, group);
1833 mutex_unlock(&group->mutex);
1834
1835 return ret;
1836}
1837EXPORT_SYMBOL_GPL(iommu_attach_group);
1838
1839static int iommu_group_do_detach_device(struct device *dev, void *data)
1840{
1841 struct iommu_domain *domain = data;
1842
1843 __iommu_detach_device(domain, dev);
1844
1845 return 0;
1846}
1847
1848static void __iommu_detach_group(struct iommu_domain *domain,
1849 struct iommu_group *group)
1850{
1851 int ret;
1852
1853 if (!group->default_domain) {
1854 __iommu_group_for_each_dev(group, domain,
1855 iommu_group_do_detach_device);
1856 group->domain = NULL;
1857 return;
1858 }
1859
1860 if (group->domain == group->default_domain)
1861 return;
1862
1863
1864 ret = __iommu_group_for_each_dev(group, group->default_domain,
1865 iommu_group_do_attach_device);
1866 if (ret != 0)
1867 WARN_ON(1);
1868 else
1869 group->domain = group->default_domain;
1870}
1871
1872void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1873{
1874 mutex_lock(&group->mutex);
1875 __iommu_detach_group(domain, group);
1876 mutex_unlock(&group->mutex);
1877}
1878EXPORT_SYMBOL_GPL(iommu_detach_group);
1879
1880phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1881{
1882 if (unlikely(domain->ops->iova_to_phys == NULL))
1883 return 0;
1884
1885 return domain->ops->iova_to_phys(domain, iova);
1886}
1887EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1888
1889static size_t iommu_pgsize(struct iommu_domain *domain,
1890 unsigned long addr_merge, size_t size)
1891{
1892 unsigned int pgsize_idx;
1893 size_t pgsize;
1894
1895
1896 pgsize_idx = __fls(size);
1897
1898
1899 if (likely(addr_merge)) {
1900
1901 unsigned int align_pgsize_idx = __ffs(addr_merge);
1902 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1903 }
1904
1905
1906 pgsize = (1UL << (pgsize_idx + 1)) - 1;
1907
1908
1909 pgsize &= domain->pgsize_bitmap;
1910
1911
1912 BUG_ON(!pgsize);
1913
1914
1915 pgsize_idx = __fls(pgsize);
1916 pgsize = 1UL << pgsize_idx;
1917
1918 return pgsize;
1919}
1920
1921int __iommu_map(struct iommu_domain *domain, unsigned long iova,
1922 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1923{
1924 const struct iommu_ops *ops = domain->ops;
1925 unsigned long orig_iova = iova;
1926 unsigned int min_pagesz;
1927 size_t orig_size = size;
1928 phys_addr_t orig_paddr = paddr;
1929 int ret = 0;
1930
1931 if (unlikely(ops->map == NULL ||
1932 domain->pgsize_bitmap == 0UL))
1933 return -ENODEV;
1934
1935 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1936 return -EINVAL;
1937
1938
1939 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1940
1941
1942
1943
1944
1945
1946 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1947 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1948 iova, &paddr, size, min_pagesz);
1949 return -EINVAL;
1950 }
1951
1952 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1953
1954 while (size) {
1955 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1956
1957 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1958 iova, &paddr, pgsize);
1959 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
1960
1961 if (ret)
1962 break;
1963
1964 iova += pgsize;
1965 paddr += pgsize;
1966 size -= pgsize;
1967 }
1968
1969 if (ops->iotlb_sync_map)
1970 ops->iotlb_sync_map(domain);
1971
1972
1973 if (ret)
1974 iommu_unmap(domain, orig_iova, orig_size - size);
1975 else
1976 trace_map(orig_iova, orig_paddr, orig_size);
1977
1978 return ret;
1979}
1980
1981int iommu_map(struct iommu_domain *domain, unsigned long iova,
1982 phys_addr_t paddr, size_t size, int prot)
1983{
1984 might_sleep();
1985 return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
1986}
1987EXPORT_SYMBOL_GPL(iommu_map);
1988
1989int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
1990 phys_addr_t paddr, size_t size, int prot)
1991{
1992 return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
1993}
1994EXPORT_SYMBOL_GPL(iommu_map_atomic);
1995
1996static size_t __iommu_unmap(struct iommu_domain *domain,
1997 unsigned long iova, size_t size,
1998 struct iommu_iotlb_gather *iotlb_gather)
1999{
2000 const struct iommu_ops *ops = domain->ops;
2001 size_t unmapped_page, unmapped = 0;
2002 unsigned long orig_iova = iova;
2003 unsigned int min_pagesz;
2004
2005 if (unlikely(ops->unmap == NULL ||
2006 domain->pgsize_bitmap == 0UL))
2007 return 0;
2008
2009 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2010 return 0;
2011
2012
2013 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2014
2015
2016
2017
2018
2019
2020 if (!IS_ALIGNED(iova | size, min_pagesz)) {
2021 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2022 iova, size, min_pagesz);
2023 return 0;
2024 }
2025
2026 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2027
2028
2029
2030
2031
2032 while (unmapped < size) {
2033 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2034
2035 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2036 if (!unmapped_page)
2037 break;
2038
2039 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2040 iova, unmapped_page);
2041
2042 iova += unmapped_page;
2043 unmapped += unmapped_page;
2044 }
2045
2046 trace_unmap(orig_iova, size, unmapped);
2047 return unmapped;
2048}
2049
2050size_t iommu_unmap(struct iommu_domain *domain,
2051 unsigned long iova, size_t size)
2052{
2053 struct iommu_iotlb_gather iotlb_gather;
2054 size_t ret;
2055
2056 iommu_iotlb_gather_init(&iotlb_gather);
2057 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2058 iommu_tlb_sync(domain, &iotlb_gather);
2059
2060 return ret;
2061}
2062EXPORT_SYMBOL_GPL(iommu_unmap);
2063
2064size_t iommu_unmap_fast(struct iommu_domain *domain,
2065 unsigned long iova, size_t size,
2066 struct iommu_iotlb_gather *iotlb_gather)
2067{
2068 return __iommu_unmap(domain, iova, size, iotlb_gather);
2069}
2070EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2071
2072size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2073 struct scatterlist *sg, unsigned int nents, int prot,
2074 gfp_t gfp)
2075{
2076 size_t len = 0, mapped = 0;
2077 phys_addr_t start;
2078 unsigned int i = 0;
2079 int ret;
2080
2081 while (i <= nents) {
2082 phys_addr_t s_phys = sg_phys(sg);
2083
2084 if (len && s_phys != start + len) {
2085 ret = __iommu_map(domain, iova + mapped, start,
2086 len, prot, gfp);
2087
2088 if (ret)
2089 goto out_err;
2090
2091 mapped += len;
2092 len = 0;
2093 }
2094
2095 if (len) {
2096 len += sg->length;
2097 } else {
2098 len = sg->length;
2099 start = s_phys;
2100 }
2101
2102 if (++i < nents)
2103 sg = sg_next(sg);
2104 }
2105
2106 return mapped;
2107
2108out_err:
2109
2110 iommu_unmap(domain, iova, mapped);
2111
2112 return 0;
2113
2114}
2115
2116size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2117 struct scatterlist *sg, unsigned int nents, int prot)
2118{
2119 might_sleep();
2120 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2121}
2122EXPORT_SYMBOL_GPL(iommu_map_sg);
2123
2124size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2125 struct scatterlist *sg, unsigned int nents, int prot)
2126{
2127 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2128}
2129EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
2130
2131int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2132 phys_addr_t paddr, u64 size, int prot)
2133{
2134 if (unlikely(domain->ops->domain_window_enable == NULL))
2135 return -ENODEV;
2136
2137 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2138 prot);
2139}
2140EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2141
2142void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2143{
2144 if (unlikely(domain->ops->domain_window_disable == NULL))
2145 return;
2146
2147 return domain->ops->domain_window_disable(domain, wnd_nr);
2148}
2149EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2176 unsigned long iova, int flags)
2177{
2178 int ret = -ENOSYS;
2179
2180
2181
2182
2183
2184 if (domain->handler)
2185 ret = domain->handler(domain, dev, iova, flags,
2186 domain->handler_token);
2187
2188 trace_io_page_fault(dev, iova, flags);
2189 return ret;
2190}
2191EXPORT_SYMBOL_GPL(report_iommu_fault);
2192
2193static int __init iommu_init(void)
2194{
2195 iommu_group_kset = kset_create_and_add("iommu_groups",
2196 NULL, kernel_kobj);
2197 BUG_ON(!iommu_group_kset);
2198
2199 iommu_debugfs_setup();
2200
2201 return 0;
2202}
2203core_initcall(iommu_init);
2204
2205int iommu_domain_get_attr(struct iommu_domain *domain,
2206 enum iommu_attr attr, void *data)
2207{
2208 struct iommu_domain_geometry *geometry;
2209 bool *paging;
2210 int ret = 0;
2211
2212 switch (attr) {
2213 case DOMAIN_ATTR_GEOMETRY:
2214 geometry = data;
2215 *geometry = domain->geometry;
2216
2217 break;
2218 case DOMAIN_ATTR_PAGING:
2219 paging = data;
2220 *paging = (domain->pgsize_bitmap != 0UL);
2221 break;
2222 default:
2223 if (!domain->ops->domain_get_attr)
2224 return -EINVAL;
2225
2226 ret = domain->ops->domain_get_attr(domain, attr, data);
2227 }
2228
2229 return ret;
2230}
2231EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2232
2233int iommu_domain_set_attr(struct iommu_domain *domain,
2234 enum iommu_attr attr, void *data)
2235{
2236 int ret = 0;
2237
2238 switch (attr) {
2239 default:
2240 if (domain->ops->domain_set_attr == NULL)
2241 return -EINVAL;
2242
2243 ret = domain->ops->domain_set_attr(domain, attr, data);
2244 }
2245
2246 return ret;
2247}
2248EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2249
2250void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2251{
2252 const struct iommu_ops *ops = dev->bus->iommu_ops;
2253
2254 if (ops && ops->get_resv_regions)
2255 ops->get_resv_regions(dev, list);
2256}
2257
2258void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2259{
2260 const struct iommu_ops *ops = dev->bus->iommu_ops;
2261
2262 if (ops && ops->put_resv_regions)
2263 ops->put_resv_regions(dev, list);
2264}
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2277{
2278 struct iommu_resv_region *entry, *next;
2279
2280 list_for_each_entry_safe(entry, next, list, list)
2281 kfree(entry);
2282}
2283EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2284
2285struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2286 size_t length, int prot,
2287 enum iommu_resv_type type)
2288{
2289 struct iommu_resv_region *region;
2290
2291 region = kzalloc(sizeof(*region), GFP_KERNEL);
2292 if (!region)
2293 return NULL;
2294
2295 INIT_LIST_HEAD(®ion->list);
2296 region->start = start;
2297 region->length = length;
2298 region->prot = prot;
2299 region->type = type;
2300 return region;
2301}
2302EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2303
2304static int
2305request_default_domain_for_dev(struct device *dev, unsigned long type)
2306{
2307 struct iommu_domain *domain;
2308 struct iommu_group *group;
2309 int ret;
2310
2311
2312 group = iommu_group_get(dev);
2313 if (!group)
2314 return -EINVAL;
2315
2316 mutex_lock(&group->mutex);
2317
2318 ret = 0;
2319 if (group->default_domain && group->default_domain->type == type)
2320 goto out;
2321
2322
2323 ret = -EBUSY;
2324 if (iommu_group_device_count(group) != 1)
2325 goto out;
2326
2327 ret = -ENOMEM;
2328 domain = __iommu_domain_alloc(dev->bus, type);
2329 if (!domain)
2330 goto out;
2331
2332
2333 ret = __iommu_attach_group(domain, group);
2334 if (ret) {
2335 iommu_domain_free(domain);
2336 goto out;
2337 }
2338
2339
2340 if (group->default_domain)
2341 iommu_domain_free(group->default_domain);
2342 group->default_domain = domain;
2343
2344 iommu_group_create_direct_mappings(group, dev);
2345
2346 dev_info(dev, "Using iommu %s mapping\n",
2347 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
2348
2349 ret = 0;
2350out:
2351 mutex_unlock(&group->mutex);
2352 iommu_group_put(group);
2353
2354 return ret;
2355}
2356
2357
2358int iommu_request_dm_for_dev(struct device *dev)
2359{
2360 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2361}
2362
2363
2364int iommu_request_dma_domain_for_dev(struct device *dev)
2365{
2366 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2367}
2368
2369void iommu_set_default_passthrough(bool cmd_line)
2370{
2371 if (cmd_line)
2372 iommu_set_cmd_line_dma_api();
2373
2374 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2375}
2376
2377void iommu_set_default_translated(bool cmd_line)
2378{
2379 if (cmd_line)
2380 iommu_set_cmd_line_dma_api();
2381
2382 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2383}
2384
2385bool iommu_default_passthrough(void)
2386{
2387 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2388}
2389EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2390
2391const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2392{
2393 const struct iommu_ops *ops = NULL;
2394 struct iommu_device *iommu;
2395
2396 spin_lock(&iommu_device_lock);
2397 list_for_each_entry(iommu, &iommu_device_list, list)
2398 if (iommu->fwnode == fwnode) {
2399 ops = iommu->ops;
2400 break;
2401 }
2402 spin_unlock(&iommu_device_lock);
2403 return ops;
2404}
2405
2406int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2407 const struct iommu_ops *ops)
2408{
2409 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2410
2411 if (fwspec)
2412 return ops == fwspec->ops ? 0 : -EINVAL;
2413
2414 if (!dev_iommu_get(dev))
2415 return -ENOMEM;
2416
2417
2418 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2419 if (!fwspec)
2420 return -ENOMEM;
2421
2422 of_node_get(to_of_node(iommu_fwnode));
2423 fwspec->iommu_fwnode = iommu_fwnode;
2424 fwspec->ops = ops;
2425 dev_iommu_fwspec_set(dev, fwspec);
2426 return 0;
2427}
2428EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2429
2430void iommu_fwspec_free(struct device *dev)
2431{
2432 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2433
2434 if (fwspec) {
2435 fwnode_handle_put(fwspec->iommu_fwnode);
2436 kfree(fwspec);
2437 dev_iommu_fwspec_set(dev, NULL);
2438 }
2439}
2440EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2441
2442int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2443{
2444 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2445 int i, new_num;
2446
2447 if (!fwspec)
2448 return -EINVAL;
2449
2450 new_num = fwspec->num_ids + num_ids;
2451 if (new_num > 1) {
2452 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2453 GFP_KERNEL);
2454 if (!fwspec)
2455 return -ENOMEM;
2456
2457 dev_iommu_fwspec_set(dev, fwspec);
2458 }
2459
2460 for (i = 0; i < num_ids; i++)
2461 fwspec->ids[fwspec->num_ids + i] = ids[i];
2462
2463 fwspec->num_ids = new_num;
2464 return 0;
2465}
2466EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2467
2468
2469
2470
2471bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2472{
2473 const struct iommu_ops *ops = dev->bus->iommu_ops;
2474
2475 if (ops && ops->dev_has_feat)
2476 return ops->dev_has_feat(dev, feat);
2477
2478 return false;
2479}
2480EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2481
2482int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2483{
2484 const struct iommu_ops *ops = dev->bus->iommu_ops;
2485
2486 if (ops && ops->dev_enable_feat)
2487 return ops->dev_enable_feat(dev, feat);
2488
2489 return -ENODEV;
2490}
2491EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2492
2493
2494
2495
2496
2497
2498int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2499{
2500 const struct iommu_ops *ops = dev->bus->iommu_ops;
2501
2502 if (ops && ops->dev_disable_feat)
2503 return ops->dev_disable_feat(dev, feat);
2504
2505 return -EBUSY;
2506}
2507EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2508
2509bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2510{
2511 const struct iommu_ops *ops = dev->bus->iommu_ops;
2512
2513 if (ops && ops->dev_feat_enabled)
2514 return ops->dev_feat_enabled(dev, feat);
2515
2516 return false;
2517}
2518EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2531{
2532 int ret = -ENODEV;
2533
2534 if (domain->ops->aux_attach_dev)
2535 ret = domain->ops->aux_attach_dev(domain, dev);
2536
2537 if (!ret)
2538 trace_attach_device_to_domain(dev);
2539
2540 return ret;
2541}
2542EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2543
2544void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2545{
2546 if (domain->ops->aux_detach_dev) {
2547 domain->ops->aux_detach_dev(domain, dev);
2548 trace_detach_device_from_domain(dev);
2549 }
2550}
2551EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2552
2553int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2554{
2555 int ret = -ENODEV;
2556
2557 if (domain->ops->aux_get_pasid)
2558 ret = domain->ops->aux_get_pasid(domain, dev);
2559
2560 return ret;
2561}
2562EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579struct iommu_sva *
2580iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2581{
2582 struct iommu_group *group;
2583 struct iommu_sva *handle = ERR_PTR(-EINVAL);
2584 const struct iommu_ops *ops = dev->bus->iommu_ops;
2585
2586 if (!ops || !ops->sva_bind)
2587 return ERR_PTR(-ENODEV);
2588
2589 group = iommu_group_get(dev);
2590 if (!group)
2591 return ERR_PTR(-ENODEV);
2592
2593
2594 mutex_lock(&group->mutex);
2595
2596
2597
2598
2599
2600
2601
2602 if (iommu_group_device_count(group) != 1)
2603 goto out_unlock;
2604
2605 handle = ops->sva_bind(dev, mm, drvdata);
2606
2607out_unlock:
2608 mutex_unlock(&group->mutex);
2609 iommu_group_put(group);
2610
2611 return handle;
2612}
2613EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625void iommu_sva_unbind_device(struct iommu_sva *handle)
2626{
2627 struct iommu_group *group;
2628 struct device *dev = handle->dev;
2629 const struct iommu_ops *ops = dev->bus->iommu_ops;
2630
2631 if (!ops || !ops->sva_unbind)
2632 return;
2633
2634 group = iommu_group_get(dev);
2635 if (!group)
2636 return;
2637
2638 mutex_lock(&group->mutex);
2639 ops->sva_unbind(handle);
2640 mutex_unlock(&group->mutex);
2641
2642 iommu_group_put(group);
2643}
2644EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2645
2646int iommu_sva_set_ops(struct iommu_sva *handle,
2647 const struct iommu_sva_ops *sva_ops)
2648{
2649 if (handle->ops && handle->ops != sva_ops)
2650 return -EEXIST;
2651
2652 handle->ops = sva_ops;
2653 return 0;
2654}
2655EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2656
2657int iommu_sva_get_pasid(struct iommu_sva *handle)
2658{
2659 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2660
2661 if (!ops || !ops->sva_get_pasid)
2662 return IOMMU_PASID_INVALID;
2663
2664 return ops->sva_get_pasid(handle);
2665}
2666EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
2667