1
2
3
4
5
6
7#define pr_fmt(fmt) "iommu: " fmt
8
9#include <linux/device.h>
10#include <linux/dma-iommu.h>
11#include <linux/kernel.h>
12#include <linux/bits.h>
13#include <linux/bug.h>
14#include <linux/types.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/slab.h>
18#include <linux/errno.h>
19#include <linux/iommu.h>
20#include <linux/idr.h>
21#include <linux/notifier.h>
22#include <linux/err.h>
23#include <linux/pci.h>
24#include <linux/bitops.h>
25#include <linux/property.h>
26#include <linux/fsl/mc.h>
27#include <linux/module.h>
28#include <trace/events/iommu.h>
29
30static struct kset *iommu_group_kset;
31static DEFINE_IDA(iommu_group_ida);
32
33static unsigned int iommu_def_domain_type __read_mostly;
34static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
35static u32 iommu_cmd_line __read_mostly;
36
37struct iommu_group {
38 struct kobject kobj;
39 struct kobject *devices_kobj;
40 struct list_head devices;
41 struct mutex mutex;
42 struct blocking_notifier_head notifier;
43 void *iommu_data;
44 void (*iommu_data_release)(void *iommu_data);
45 char *name;
46 int id;
47 struct iommu_domain *default_domain;
48 struct iommu_domain *domain;
49 struct list_head entry;
50};
51
52struct group_device {
53 struct list_head list;
54 struct device *dev;
55 char *name;
56};
57
58struct iommu_group_attribute {
59 struct attribute attr;
60 ssize_t (*show)(struct iommu_group *group, char *buf);
61 ssize_t (*store)(struct iommu_group *group,
62 const char *buf, size_t count);
63};
64
65static const char * const iommu_group_resv_type_string[] = {
66 [IOMMU_RESV_DIRECT] = "direct",
67 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
68 [IOMMU_RESV_RESERVED] = "reserved",
69 [IOMMU_RESV_MSI] = "msi",
70 [IOMMU_RESV_SW_MSI] = "msi",
71};
72
73#define IOMMU_CMD_LINE_DMA_API BIT(0)
74#define IOMMU_CMD_LINE_STRICT BIT(1)
75
76static int iommu_alloc_default_domain(struct iommu_group *group,
77 struct device *dev);
78static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
79 unsigned type);
80static int __iommu_attach_device(struct iommu_domain *domain,
81 struct device *dev);
82static int __iommu_attach_group(struct iommu_domain *domain,
83 struct iommu_group *group);
84static void __iommu_detach_group(struct iommu_domain *domain,
85 struct iommu_group *group);
86static int iommu_create_device_direct_mappings(struct iommu_group *group,
87 struct device *dev);
88static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
89static ssize_t iommu_group_store_type(struct iommu_group *group,
90 const char *buf, size_t count);
91
92#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
93struct iommu_group_attribute iommu_group_attr_##_name = \
94 __ATTR(_name, _mode, _show, _store)
95
96#define to_iommu_group_attr(_attr) \
97 container_of(_attr, struct iommu_group_attribute, attr)
98#define to_iommu_group(_kobj) \
99 container_of(_kobj, struct iommu_group, kobj)
100
101static LIST_HEAD(iommu_device_list);
102static DEFINE_SPINLOCK(iommu_device_lock);
103
104
105
106
107
108static const char *iommu_domain_type_str(unsigned int t)
109{
110 switch (t) {
111 case IOMMU_DOMAIN_BLOCKED:
112 return "Blocked";
113 case IOMMU_DOMAIN_IDENTITY:
114 return "Passthrough";
115 case IOMMU_DOMAIN_UNMANAGED:
116 return "Unmanaged";
117 case IOMMU_DOMAIN_DMA:
118 case IOMMU_DOMAIN_DMA_FQ:
119 return "Translated";
120 default:
121 return "Unknown";
122 }
123}
124
125static int __init iommu_subsys_init(void)
126{
127 if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
128 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
129 iommu_set_default_passthrough(false);
130 else
131 iommu_set_default_translated(false);
132
133 if (iommu_default_passthrough() && mem_encrypt_active()) {
134 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
135 iommu_set_default_translated(false);
136 }
137 }
138
139 if (!iommu_default_passthrough() && !iommu_dma_strict)
140 iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ;
141
142 pr_info("Default domain type: %s %s\n",
143 iommu_domain_type_str(iommu_def_domain_type),
144 (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
145 "(set via kernel command line)" : "");
146
147 if (!iommu_default_passthrough())
148 pr_info("DMA domain TLB invalidation policy: %s mode %s\n",
149 iommu_dma_strict ? "strict" : "lazy",
150 (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
151 "(set via kernel command line)" : "");
152
153 return 0;
154}
155subsys_initcall(iommu_subsys_init);
156
157
158
159
160
161
162
163
164
165int iommu_device_register(struct iommu_device *iommu,
166 const struct iommu_ops *ops, struct device *hwdev)
167{
168
169 if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
170 return -EINVAL;
171
172 iommu->ops = ops;
173 if (hwdev)
174 iommu->fwnode = hwdev->fwnode;
175
176 spin_lock(&iommu_device_lock);
177 list_add_tail(&iommu->list, &iommu_device_list);
178 spin_unlock(&iommu_device_lock);
179 return 0;
180}
181EXPORT_SYMBOL_GPL(iommu_device_register);
182
183void iommu_device_unregister(struct iommu_device *iommu)
184{
185 spin_lock(&iommu_device_lock);
186 list_del(&iommu->list);
187 spin_unlock(&iommu_device_lock);
188}
189EXPORT_SYMBOL_GPL(iommu_device_unregister);
190
191static struct dev_iommu *dev_iommu_get(struct device *dev)
192{
193 struct dev_iommu *param = dev->iommu;
194
195 if (param)
196 return param;
197
198 param = kzalloc(sizeof(*param), GFP_KERNEL);
199 if (!param)
200 return NULL;
201
202 mutex_init(¶m->lock);
203 dev->iommu = param;
204 return param;
205}
206
207static void dev_iommu_free(struct device *dev)
208{
209 iommu_fwspec_free(dev);
210 kfree(dev->iommu);
211 dev->iommu = NULL;
212}
213
214static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
215{
216 const struct iommu_ops *ops = dev->bus->iommu_ops;
217 struct iommu_device *iommu_dev;
218 struct iommu_group *group;
219 int ret;
220
221 if (!ops)
222 return -ENODEV;
223
224 if (!dev_iommu_get(dev))
225 return -ENOMEM;
226
227 if (!try_module_get(ops->owner)) {
228 ret = -EINVAL;
229 goto err_free;
230 }
231
232 iommu_dev = ops->probe_device(dev);
233 if (IS_ERR(iommu_dev)) {
234 ret = PTR_ERR(iommu_dev);
235 goto out_module_put;
236 }
237
238 dev->iommu->iommu_dev = iommu_dev;
239
240 group = iommu_group_get_for_dev(dev);
241 if (IS_ERR(group)) {
242 ret = PTR_ERR(group);
243 goto out_release;
244 }
245 iommu_group_put(group);
246
247 if (group_list && !group->default_domain && list_empty(&group->entry))
248 list_add_tail(&group->entry, group_list);
249
250 iommu_device_link(iommu_dev, dev);
251
252 return 0;
253
254out_release:
255 ops->release_device(dev);
256
257out_module_put:
258 module_put(ops->owner);
259
260err_free:
261 dev_iommu_free(dev);
262
263 return ret;
264}
265
266int iommu_probe_device(struct device *dev)
267{
268 const struct iommu_ops *ops = dev->bus->iommu_ops;
269 struct iommu_group *group;
270 int ret;
271
272 ret = __iommu_probe_device(dev, NULL);
273 if (ret)
274 goto err_out;
275
276 group = iommu_group_get(dev);
277 if (!group) {
278 ret = -ENODEV;
279 goto err_release;
280 }
281
282
283
284
285
286
287
288 mutex_lock(&group->mutex);
289 iommu_alloc_default_domain(group, dev);
290 mutex_unlock(&group->mutex);
291
292 if (group->default_domain) {
293 ret = __iommu_attach_device(group->default_domain, dev);
294 if (ret) {
295 iommu_group_put(group);
296 goto err_release;
297 }
298 }
299
300 iommu_create_device_direct_mappings(group, dev);
301
302 iommu_group_put(group);
303
304 if (ops->probe_finalize)
305 ops->probe_finalize(dev);
306
307 return 0;
308
309err_release:
310 iommu_release_device(dev);
311
312err_out:
313 return ret;
314
315}
316
317void iommu_release_device(struct device *dev)
318{
319 const struct iommu_ops *ops = dev->bus->iommu_ops;
320
321 if (!dev->iommu)
322 return;
323
324 iommu_device_unlink(dev->iommu->iommu_dev, dev);
325
326 ops->release_device(dev);
327
328 iommu_group_remove_device(dev);
329 module_put(ops->owner);
330 dev_iommu_free(dev);
331}
332
333static int __init iommu_set_def_domain_type(char *str)
334{
335 bool pt;
336 int ret;
337
338 ret = kstrtobool(str, &pt);
339 if (ret)
340 return ret;
341
342 if (pt)
343 iommu_set_default_passthrough(true);
344 else
345 iommu_set_default_translated(true);
346
347 return 0;
348}
349early_param("iommu.passthrough", iommu_set_def_domain_type);
350
351static int __init iommu_dma_setup(char *str)
352{
353 int ret = kstrtobool(str, &iommu_dma_strict);
354
355 if (!ret)
356 iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
357 return ret;
358}
359early_param("iommu.strict", iommu_dma_setup);
360
361void iommu_set_dma_strict(void)
362{
363 iommu_dma_strict = true;
364 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ)
365 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
366}
367
368static ssize_t iommu_group_attr_show(struct kobject *kobj,
369 struct attribute *__attr, char *buf)
370{
371 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
372 struct iommu_group *group = to_iommu_group(kobj);
373 ssize_t ret = -EIO;
374
375 if (attr->show)
376 ret = attr->show(group, buf);
377 return ret;
378}
379
380static ssize_t iommu_group_attr_store(struct kobject *kobj,
381 struct attribute *__attr,
382 const char *buf, size_t count)
383{
384 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
385 struct iommu_group *group = to_iommu_group(kobj);
386 ssize_t ret = -EIO;
387
388 if (attr->store)
389 ret = attr->store(group, buf, count);
390 return ret;
391}
392
393static const struct sysfs_ops iommu_group_sysfs_ops = {
394 .show = iommu_group_attr_show,
395 .store = iommu_group_attr_store,
396};
397
398static int iommu_group_create_file(struct iommu_group *group,
399 struct iommu_group_attribute *attr)
400{
401 return sysfs_create_file(&group->kobj, &attr->attr);
402}
403
404static void iommu_group_remove_file(struct iommu_group *group,
405 struct iommu_group_attribute *attr)
406{
407 sysfs_remove_file(&group->kobj, &attr->attr);
408}
409
410static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
411{
412 return sprintf(buf, "%s\n", group->name);
413}
414
415
416
417
418
419
420
421
422
423
424static int iommu_insert_resv_region(struct iommu_resv_region *new,
425 struct list_head *regions)
426{
427 struct iommu_resv_region *iter, *tmp, *nr, *top;
428 LIST_HEAD(stack);
429
430 nr = iommu_alloc_resv_region(new->start, new->length,
431 new->prot, new->type);
432 if (!nr)
433 return -ENOMEM;
434
435
436 list_for_each_entry(iter, regions, list) {
437 if (nr->start < iter->start ||
438 (nr->start == iter->start && nr->type <= iter->type))
439 break;
440 }
441 list_add_tail(&nr->list, &iter->list);
442
443
444 list_for_each_entry_safe(iter, tmp, regions, list) {
445 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
446
447
448 if (iter->type != new->type) {
449 list_move_tail(&iter->list, &stack);
450 continue;
451 }
452
453
454 list_for_each_entry_reverse(top, &stack, list)
455 if (top->type == iter->type)
456 goto check_overlap;
457
458 list_move_tail(&iter->list, &stack);
459 continue;
460
461check_overlap:
462 top_end = top->start + top->length - 1;
463
464 if (iter->start > top_end + 1) {
465 list_move_tail(&iter->list, &stack);
466 } else {
467 top->length = max(top_end, iter_end) - top->start + 1;
468 list_del(&iter->list);
469 kfree(iter);
470 }
471 }
472 list_splice(&stack, regions);
473 return 0;
474}
475
476static int
477iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
478 struct list_head *group_resv_regions)
479{
480 struct iommu_resv_region *entry;
481 int ret = 0;
482
483 list_for_each_entry(entry, dev_resv_regions, list) {
484 ret = iommu_insert_resv_region(entry, group_resv_regions);
485 if (ret)
486 break;
487 }
488 return ret;
489}
490
491int iommu_get_group_resv_regions(struct iommu_group *group,
492 struct list_head *head)
493{
494 struct group_device *device;
495 int ret = 0;
496
497 mutex_lock(&group->mutex);
498 list_for_each_entry(device, &group->devices, list) {
499 struct list_head dev_resv_regions;
500
501 INIT_LIST_HEAD(&dev_resv_regions);
502 iommu_get_resv_regions(device->dev, &dev_resv_regions);
503 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
504 iommu_put_resv_regions(device->dev, &dev_resv_regions);
505 if (ret)
506 break;
507 }
508 mutex_unlock(&group->mutex);
509 return ret;
510}
511EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
512
513static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
514 char *buf)
515{
516 struct iommu_resv_region *region, *next;
517 struct list_head group_resv_regions;
518 char *str = buf;
519
520 INIT_LIST_HEAD(&group_resv_regions);
521 iommu_get_group_resv_regions(group, &group_resv_regions);
522
523 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
524 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
525 (long long int)region->start,
526 (long long int)(region->start +
527 region->length - 1),
528 iommu_group_resv_type_string[region->type]);
529 kfree(region);
530 }
531
532 return (str - buf);
533}
534
535static ssize_t iommu_group_show_type(struct iommu_group *group,
536 char *buf)
537{
538 char *type = "unknown\n";
539
540 mutex_lock(&group->mutex);
541 if (group->default_domain) {
542 switch (group->default_domain->type) {
543 case IOMMU_DOMAIN_BLOCKED:
544 type = "blocked\n";
545 break;
546 case IOMMU_DOMAIN_IDENTITY:
547 type = "identity\n";
548 break;
549 case IOMMU_DOMAIN_UNMANAGED:
550 type = "unmanaged\n";
551 break;
552 case IOMMU_DOMAIN_DMA:
553 type = "DMA\n";
554 break;
555 case IOMMU_DOMAIN_DMA_FQ:
556 type = "DMA-FQ\n";
557 break;
558 }
559 }
560 mutex_unlock(&group->mutex);
561 strcpy(buf, type);
562
563 return strlen(type);
564}
565
566static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
567
568static IOMMU_GROUP_ATTR(reserved_regions, 0444,
569 iommu_group_show_resv_regions, NULL);
570
571static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
572 iommu_group_store_type);
573
574static void iommu_group_release(struct kobject *kobj)
575{
576 struct iommu_group *group = to_iommu_group(kobj);
577
578 pr_debug("Releasing group %d\n", group->id);
579
580 if (group->iommu_data_release)
581 group->iommu_data_release(group->iommu_data);
582
583 ida_simple_remove(&iommu_group_ida, group->id);
584
585 if (group->default_domain)
586 iommu_domain_free(group->default_domain);
587
588 kfree(group->name);
589 kfree(group);
590}
591
592static struct kobj_type iommu_group_ktype = {
593 .sysfs_ops = &iommu_group_sysfs_ops,
594 .release = iommu_group_release,
595};
596
597
598
599
600
601
602
603
604
605
606
607
608struct iommu_group *iommu_group_alloc(void)
609{
610 struct iommu_group *group;
611 int ret;
612
613 group = kzalloc(sizeof(*group), GFP_KERNEL);
614 if (!group)
615 return ERR_PTR(-ENOMEM);
616
617 group->kobj.kset = iommu_group_kset;
618 mutex_init(&group->mutex);
619 INIT_LIST_HEAD(&group->devices);
620 INIT_LIST_HEAD(&group->entry);
621 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
622
623 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
624 if (ret < 0) {
625 kfree(group);
626 return ERR_PTR(ret);
627 }
628 group->id = ret;
629
630 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
631 NULL, "%d", group->id);
632 if (ret) {
633 ida_simple_remove(&iommu_group_ida, group->id);
634 kobject_put(&group->kobj);
635 return ERR_PTR(ret);
636 }
637
638 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
639 if (!group->devices_kobj) {
640 kobject_put(&group->kobj);
641 return ERR_PTR(-ENOMEM);
642 }
643
644
645
646
647
648
649 kobject_put(&group->kobj);
650
651 ret = iommu_group_create_file(group,
652 &iommu_group_attr_reserved_regions);
653 if (ret)
654 return ERR_PTR(ret);
655
656 ret = iommu_group_create_file(group, &iommu_group_attr_type);
657 if (ret)
658 return ERR_PTR(ret);
659
660 pr_debug("Allocated group %d\n", group->id);
661
662 return group;
663}
664EXPORT_SYMBOL_GPL(iommu_group_alloc);
665
666struct iommu_group *iommu_group_get_by_id(int id)
667{
668 struct kobject *group_kobj;
669 struct iommu_group *group;
670 const char *name;
671
672 if (!iommu_group_kset)
673 return NULL;
674
675 name = kasprintf(GFP_KERNEL, "%d", id);
676 if (!name)
677 return NULL;
678
679 group_kobj = kset_find_obj(iommu_group_kset, name);
680 kfree(name);
681
682 if (!group_kobj)
683 return NULL;
684
685 group = container_of(group_kobj, struct iommu_group, kobj);
686 BUG_ON(group->id != id);
687
688 kobject_get(group->devices_kobj);
689 kobject_put(&group->kobj);
690
691 return group;
692}
693EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
694
695
696
697
698
699
700
701
702
703void *iommu_group_get_iommudata(struct iommu_group *group)
704{
705 return group->iommu_data;
706}
707EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
708
709
710
711
712
713
714
715
716
717
718
719void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
720 void (*release)(void *iommu_data))
721{
722 group->iommu_data = iommu_data;
723 group->iommu_data_release = release;
724}
725EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
726
727
728
729
730
731
732
733
734
735int iommu_group_set_name(struct iommu_group *group, const char *name)
736{
737 int ret;
738
739 if (group->name) {
740 iommu_group_remove_file(group, &iommu_group_attr_name);
741 kfree(group->name);
742 group->name = NULL;
743 if (!name)
744 return 0;
745 }
746
747 group->name = kstrdup(name, GFP_KERNEL);
748 if (!group->name)
749 return -ENOMEM;
750
751 ret = iommu_group_create_file(group, &iommu_group_attr_name);
752 if (ret) {
753 kfree(group->name);
754 group->name = NULL;
755 return ret;
756 }
757
758 return 0;
759}
760EXPORT_SYMBOL_GPL(iommu_group_set_name);
761
762static int iommu_create_device_direct_mappings(struct iommu_group *group,
763 struct device *dev)
764{
765 struct iommu_domain *domain = group->default_domain;
766 struct iommu_resv_region *entry;
767 struct list_head mappings;
768 unsigned long pg_size;
769 int ret = 0;
770
771 if (!domain || !iommu_is_dma_domain(domain))
772 return 0;
773
774 BUG_ON(!domain->pgsize_bitmap);
775
776 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
777 INIT_LIST_HEAD(&mappings);
778
779 iommu_get_resv_regions(dev, &mappings);
780
781
782 list_for_each_entry(entry, &mappings, list) {
783 dma_addr_t start, end, addr;
784 size_t map_size = 0;
785
786 if (domain->ops->apply_resv_region)
787 domain->ops->apply_resv_region(dev, domain, entry);
788
789 start = ALIGN(entry->start, pg_size);
790 end = ALIGN(entry->start + entry->length, pg_size);
791
792 if (entry->type != IOMMU_RESV_DIRECT &&
793 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
794 continue;
795
796 for (addr = start; addr <= end; addr += pg_size) {
797 phys_addr_t phys_addr;
798
799 if (addr == end)
800 goto map_end;
801
802 phys_addr = iommu_iova_to_phys(domain, addr);
803 if (!phys_addr) {
804 map_size += pg_size;
805 continue;
806 }
807
808map_end:
809 if (map_size) {
810 ret = iommu_map(domain, addr - map_size,
811 addr - map_size, map_size,
812 entry->prot);
813 if (ret)
814 goto out;
815 map_size = 0;
816 }
817 }
818
819 }
820
821 iommu_flush_iotlb_all(domain);
822
823out:
824 iommu_put_resv_regions(dev, &mappings);
825
826 return ret;
827}
828
829static bool iommu_is_attach_deferred(struct iommu_domain *domain,
830 struct device *dev)
831{
832 if (domain->ops->is_attach_deferred)
833 return domain->ops->is_attach_deferred(domain, dev);
834
835 return false;
836}
837
838
839
840
841
842
843
844
845
846int iommu_group_add_device(struct iommu_group *group, struct device *dev)
847{
848 int ret, i = 0;
849 struct group_device *device;
850
851 device = kzalloc(sizeof(*device), GFP_KERNEL);
852 if (!device)
853 return -ENOMEM;
854
855 device->dev = dev;
856
857 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
858 if (ret)
859 goto err_free_device;
860
861 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
862rename:
863 if (!device->name) {
864 ret = -ENOMEM;
865 goto err_remove_link;
866 }
867
868 ret = sysfs_create_link_nowarn(group->devices_kobj,
869 &dev->kobj, device->name);
870 if (ret) {
871 if (ret == -EEXIST && i >= 0) {
872
873
874
875
876 kfree(device->name);
877 device->name = kasprintf(GFP_KERNEL, "%s.%d",
878 kobject_name(&dev->kobj), i++);
879 goto rename;
880 }
881 goto err_free_name;
882 }
883
884 kobject_get(group->devices_kobj);
885
886 dev->iommu_group = group;
887
888 mutex_lock(&group->mutex);
889 list_add_tail(&device->list, &group->devices);
890 if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
891 ret = __iommu_attach_device(group->domain, dev);
892 mutex_unlock(&group->mutex);
893 if (ret)
894 goto err_put_group;
895
896
897 blocking_notifier_call_chain(&group->notifier,
898 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
899
900 trace_add_device_to_group(group->id, dev);
901
902 dev_info(dev, "Adding to iommu group %d\n", group->id);
903
904 return 0;
905
906err_put_group:
907 mutex_lock(&group->mutex);
908 list_del(&device->list);
909 mutex_unlock(&group->mutex);
910 dev->iommu_group = NULL;
911 kobject_put(group->devices_kobj);
912 sysfs_remove_link(group->devices_kobj, device->name);
913err_free_name:
914 kfree(device->name);
915err_remove_link:
916 sysfs_remove_link(&dev->kobj, "iommu_group");
917err_free_device:
918 kfree(device);
919 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
920 return ret;
921}
922EXPORT_SYMBOL_GPL(iommu_group_add_device);
923
924
925
926
927
928
929
930
931void iommu_group_remove_device(struct device *dev)
932{
933 struct iommu_group *group = dev->iommu_group;
934 struct group_device *tmp_device, *device = NULL;
935
936 if (!group)
937 return;
938
939 dev_info(dev, "Removing from iommu group %d\n", group->id);
940
941
942 blocking_notifier_call_chain(&group->notifier,
943 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
944
945 mutex_lock(&group->mutex);
946 list_for_each_entry(tmp_device, &group->devices, list) {
947 if (tmp_device->dev == dev) {
948 device = tmp_device;
949 list_del(&device->list);
950 break;
951 }
952 }
953 mutex_unlock(&group->mutex);
954
955 if (!device)
956 return;
957
958 sysfs_remove_link(group->devices_kobj, device->name);
959 sysfs_remove_link(&dev->kobj, "iommu_group");
960
961 trace_remove_device_from_group(group->id, dev);
962
963 kfree(device->name);
964 kfree(device);
965 dev->iommu_group = NULL;
966 kobject_put(group->devices_kobj);
967}
968EXPORT_SYMBOL_GPL(iommu_group_remove_device);
969
970static int iommu_group_device_count(struct iommu_group *group)
971{
972 struct group_device *entry;
973 int ret = 0;
974
975 list_for_each_entry(entry, &group->devices, list)
976 ret++;
977
978 return ret;
979}
980
981
982
983
984
985
986
987
988
989
990
991
992static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
993 int (*fn)(struct device *, void *))
994{
995 struct group_device *device;
996 int ret = 0;
997
998 list_for_each_entry(device, &group->devices, list) {
999 ret = fn(device->dev, data);
1000 if (ret)
1001 break;
1002 }
1003 return ret;
1004}
1005
1006
1007int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1008 int (*fn)(struct device *, void *))
1009{
1010 int ret;
1011
1012 mutex_lock(&group->mutex);
1013 ret = __iommu_group_for_each_dev(group, data, fn);
1014 mutex_unlock(&group->mutex);
1015
1016 return ret;
1017}
1018EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028struct iommu_group *iommu_group_get(struct device *dev)
1029{
1030 struct iommu_group *group = dev->iommu_group;
1031
1032 if (group)
1033 kobject_get(group->devices_kobj);
1034
1035 return group;
1036}
1037EXPORT_SYMBOL_GPL(iommu_group_get);
1038
1039
1040
1041
1042
1043
1044
1045
1046struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1047{
1048 kobject_get(group->devices_kobj);
1049 return group;
1050}
1051EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1052
1053
1054
1055
1056
1057
1058
1059
1060void iommu_group_put(struct iommu_group *group)
1061{
1062 if (group)
1063 kobject_put(group->devices_kobj);
1064}
1065EXPORT_SYMBOL_GPL(iommu_group_put);
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076int iommu_group_register_notifier(struct iommu_group *group,
1077 struct notifier_block *nb)
1078{
1079 return blocking_notifier_chain_register(&group->notifier, nb);
1080}
1081EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
1082
1083
1084
1085
1086
1087
1088
1089
1090int iommu_group_unregister_notifier(struct iommu_group *group,
1091 struct notifier_block *nb)
1092{
1093 return blocking_notifier_chain_unregister(&group->notifier, nb);
1094}
1095EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115int iommu_register_device_fault_handler(struct device *dev,
1116 iommu_dev_fault_handler_t handler,
1117 void *data)
1118{
1119 struct dev_iommu *param = dev->iommu;
1120 int ret = 0;
1121
1122 if (!param)
1123 return -EINVAL;
1124
1125 mutex_lock(¶m->lock);
1126
1127 if (param->fault_param) {
1128 ret = -EBUSY;
1129 goto done_unlock;
1130 }
1131
1132 get_device(dev);
1133 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1134 if (!param->fault_param) {
1135 put_device(dev);
1136 ret = -ENOMEM;
1137 goto done_unlock;
1138 }
1139 param->fault_param->handler = handler;
1140 param->fault_param->data = data;
1141 mutex_init(¶m->fault_param->lock);
1142 INIT_LIST_HEAD(¶m->fault_param->faults);
1143
1144done_unlock:
1145 mutex_unlock(¶m->lock);
1146
1147 return ret;
1148}
1149EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160int iommu_unregister_device_fault_handler(struct device *dev)
1161{
1162 struct dev_iommu *param = dev->iommu;
1163 int ret = 0;
1164
1165 if (!param)
1166 return -EINVAL;
1167
1168 mutex_lock(¶m->lock);
1169
1170 if (!param->fault_param)
1171 goto unlock;
1172
1173
1174 if (!list_empty(¶m->fault_param->faults)) {
1175 ret = -EBUSY;
1176 goto unlock;
1177 }
1178
1179 kfree(param->fault_param);
1180 param->fault_param = NULL;
1181 put_device(dev);
1182unlock:
1183 mutex_unlock(¶m->lock);
1184
1185 return ret;
1186}
1187EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1201{
1202 struct dev_iommu *param = dev->iommu;
1203 struct iommu_fault_event *evt_pending = NULL;
1204 struct iommu_fault_param *fparam;
1205 int ret = 0;
1206
1207 if (!param || !evt)
1208 return -EINVAL;
1209
1210
1211 mutex_lock(¶m->lock);
1212 fparam = param->fault_param;
1213 if (!fparam || !fparam->handler) {
1214 ret = -EINVAL;
1215 goto done_unlock;
1216 }
1217
1218 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1219 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1220 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1221 GFP_KERNEL);
1222 if (!evt_pending) {
1223 ret = -ENOMEM;
1224 goto done_unlock;
1225 }
1226 mutex_lock(&fparam->lock);
1227 list_add_tail(&evt_pending->list, &fparam->faults);
1228 mutex_unlock(&fparam->lock);
1229 }
1230
1231 ret = fparam->handler(&evt->fault, fparam->data);
1232 if (ret && evt_pending) {
1233 mutex_lock(&fparam->lock);
1234 list_del(&evt_pending->list);
1235 mutex_unlock(&fparam->lock);
1236 kfree(evt_pending);
1237 }
1238done_unlock:
1239 mutex_unlock(¶m->lock);
1240 return ret;
1241}
1242EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1243
1244int iommu_page_response(struct device *dev,
1245 struct iommu_page_response *msg)
1246{
1247 bool needs_pasid;
1248 int ret = -EINVAL;
1249 struct iommu_fault_event *evt;
1250 struct iommu_fault_page_request *prm;
1251 struct dev_iommu *param = dev->iommu;
1252 bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1253 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1254
1255 if (!domain || !domain->ops->page_response)
1256 return -ENODEV;
1257
1258 if (!param || !param->fault_param)
1259 return -EINVAL;
1260
1261 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1262 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1263 return -EINVAL;
1264
1265
1266 mutex_lock(¶m->fault_param->lock);
1267 if (list_empty(¶m->fault_param->faults)) {
1268 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1269 goto done_unlock;
1270 }
1271
1272
1273
1274
1275 list_for_each_entry(evt, ¶m->fault_param->faults, list) {
1276 prm = &evt->fault.prm;
1277 if (prm->grpid != msg->grpid)
1278 continue;
1279
1280
1281
1282
1283
1284
1285
1286 needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
1287 if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1288 continue;
1289
1290 if (!needs_pasid && has_pasid) {
1291
1292 msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
1293 msg->pasid = 0;
1294 }
1295
1296 ret = domain->ops->page_response(dev, evt, msg);
1297 list_del(&evt->list);
1298 kfree(evt);
1299 break;
1300 }
1301
1302done_unlock:
1303 mutex_unlock(¶m->fault_param->lock);
1304 return ret;
1305}
1306EXPORT_SYMBOL_GPL(iommu_page_response);
1307
1308
1309
1310
1311
1312
1313
1314int iommu_group_id(struct iommu_group *group)
1315{
1316 return group->id;
1317}
1318EXPORT_SYMBOL_GPL(iommu_group_id);
1319
1320static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1321 unsigned long *devfns);
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1332
1333
1334
1335
1336
1337
1338
1339static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1340 unsigned long *devfns)
1341{
1342 struct pci_dev *tmp = NULL;
1343 struct iommu_group *group;
1344
1345 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1346 return NULL;
1347
1348 for_each_pci_dev(tmp) {
1349 if (tmp == pdev || tmp->bus != pdev->bus ||
1350 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1351 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1352 continue;
1353
1354 group = get_pci_alias_group(tmp, devfns);
1355 if (group) {
1356 pci_dev_put(tmp);
1357 return group;
1358 }
1359 }
1360
1361 return NULL;
1362}
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1374 unsigned long *devfns)
1375{
1376 struct pci_dev *tmp = NULL;
1377 struct iommu_group *group;
1378
1379 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1380 return NULL;
1381
1382 group = iommu_group_get(&pdev->dev);
1383 if (group)
1384 return group;
1385
1386 for_each_pci_dev(tmp) {
1387 if (tmp == pdev || tmp->bus != pdev->bus)
1388 continue;
1389
1390
1391 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1392 group = get_pci_alias_group(tmp, devfns);
1393 if (group) {
1394 pci_dev_put(tmp);
1395 return group;
1396 }
1397
1398 group = get_pci_function_alias_group(tmp, devfns);
1399 if (group) {
1400 pci_dev_put(tmp);
1401 return group;
1402 }
1403 }
1404 }
1405
1406 return NULL;
1407}
1408
1409struct group_for_pci_data {
1410 struct pci_dev *pdev;
1411 struct iommu_group *group;
1412};
1413
1414
1415
1416
1417
1418static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1419{
1420 struct group_for_pci_data *data = opaque;
1421
1422 data->pdev = pdev;
1423 data->group = iommu_group_get(&pdev->dev);
1424
1425 return data->group != NULL;
1426}
1427
1428
1429
1430
1431
1432struct iommu_group *generic_device_group(struct device *dev)
1433{
1434 return iommu_group_alloc();
1435}
1436EXPORT_SYMBOL_GPL(generic_device_group);
1437
1438
1439
1440
1441
1442struct iommu_group *pci_device_group(struct device *dev)
1443{
1444 struct pci_dev *pdev = to_pci_dev(dev);
1445 struct group_for_pci_data data;
1446 struct pci_bus *bus;
1447 struct iommu_group *group = NULL;
1448 u64 devfns[4] = { 0 };
1449
1450 if (WARN_ON(!dev_is_pci(dev)))
1451 return ERR_PTR(-EINVAL);
1452
1453
1454
1455
1456
1457
1458
1459 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1460 return data.group;
1461
1462 pdev = data.pdev;
1463
1464
1465
1466
1467
1468
1469
1470 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1471 if (!bus->self)
1472 continue;
1473
1474 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1475 break;
1476
1477 pdev = bus->self;
1478
1479 group = iommu_group_get(&pdev->dev);
1480 if (group)
1481 return group;
1482 }
1483
1484
1485
1486
1487
1488 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1489 if (group)
1490 return group;
1491
1492
1493
1494
1495
1496
1497 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1498 if (group)
1499 return group;
1500
1501
1502 return iommu_group_alloc();
1503}
1504EXPORT_SYMBOL_GPL(pci_device_group);
1505
1506
1507struct iommu_group *fsl_mc_device_group(struct device *dev)
1508{
1509 struct device *cont_dev = fsl_mc_cont_dev(dev);
1510 struct iommu_group *group;
1511
1512 group = iommu_group_get(cont_dev);
1513 if (!group)
1514 group = iommu_group_alloc();
1515 return group;
1516}
1517EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1518
1519static int iommu_get_def_domain_type(struct device *dev)
1520{
1521 const struct iommu_ops *ops = dev->bus->iommu_ops;
1522
1523 if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
1524 return IOMMU_DOMAIN_DMA;
1525
1526 if (ops->def_domain_type)
1527 return ops->def_domain_type(dev);
1528
1529 return 0;
1530}
1531
1532static int iommu_group_alloc_default_domain(struct bus_type *bus,
1533 struct iommu_group *group,
1534 unsigned int type)
1535{
1536 struct iommu_domain *dom;
1537
1538 dom = __iommu_domain_alloc(bus, type);
1539 if (!dom && type != IOMMU_DOMAIN_DMA) {
1540 dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1541 if (dom)
1542 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1543 type, group->name);
1544 }
1545
1546 if (!dom)
1547 return -ENOMEM;
1548
1549 group->default_domain = dom;
1550 if (!group->domain)
1551 group->domain = dom;
1552 return 0;
1553}
1554
1555static int iommu_alloc_default_domain(struct iommu_group *group,
1556 struct device *dev)
1557{
1558 unsigned int type;
1559
1560 if (group->default_domain)
1561 return 0;
1562
1563 type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type;
1564
1565 return iommu_group_alloc_default_domain(dev->bus, group, type);
1566}
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1579{
1580 const struct iommu_ops *ops = dev->bus->iommu_ops;
1581 struct iommu_group *group;
1582 int ret;
1583
1584 group = iommu_group_get(dev);
1585 if (group)
1586 return group;
1587
1588 if (!ops)
1589 return ERR_PTR(-EINVAL);
1590
1591 group = ops->device_group(dev);
1592 if (WARN_ON_ONCE(group == NULL))
1593 return ERR_PTR(-EINVAL);
1594
1595 if (IS_ERR(group))
1596 return group;
1597
1598 ret = iommu_group_add_device(group, dev);
1599 if (ret)
1600 goto out_put_group;
1601
1602 return group;
1603
1604out_put_group:
1605 iommu_group_put(group);
1606
1607 return ERR_PTR(ret);
1608}
1609
1610struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1611{
1612 return group->default_domain;
1613}
1614
1615static int probe_iommu_group(struct device *dev, void *data)
1616{
1617 struct list_head *group_list = data;
1618 struct iommu_group *group;
1619 int ret;
1620
1621
1622 group = iommu_group_get(dev);
1623 if (group) {
1624 iommu_group_put(group);
1625 return 0;
1626 }
1627
1628 ret = __iommu_probe_device(dev, group_list);
1629 if (ret == -ENODEV)
1630 ret = 0;
1631
1632 return ret;
1633}
1634
1635static int remove_iommu_group(struct device *dev, void *data)
1636{
1637 iommu_release_device(dev);
1638
1639 return 0;
1640}
1641
1642static int iommu_bus_notifier(struct notifier_block *nb,
1643 unsigned long action, void *data)
1644{
1645 unsigned long group_action = 0;
1646 struct device *dev = data;
1647 struct iommu_group *group;
1648
1649
1650
1651
1652
1653 if (action == BUS_NOTIFY_ADD_DEVICE) {
1654 int ret;
1655
1656 ret = iommu_probe_device(dev);
1657 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1658 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1659 iommu_release_device(dev);
1660 return NOTIFY_OK;
1661 }
1662
1663
1664
1665
1666
1667 group = iommu_group_get(dev);
1668 if (!group)
1669 return 0;
1670
1671 switch (action) {
1672 case BUS_NOTIFY_BIND_DRIVER:
1673 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1674 break;
1675 case BUS_NOTIFY_BOUND_DRIVER:
1676 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1677 break;
1678 case BUS_NOTIFY_UNBIND_DRIVER:
1679 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1680 break;
1681 case BUS_NOTIFY_UNBOUND_DRIVER:
1682 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1683 break;
1684 }
1685
1686 if (group_action)
1687 blocking_notifier_call_chain(&group->notifier,
1688 group_action, dev);
1689
1690 iommu_group_put(group);
1691 return 0;
1692}
1693
1694struct __group_domain_type {
1695 struct device *dev;
1696 unsigned int type;
1697};
1698
1699static int probe_get_default_domain_type(struct device *dev, void *data)
1700{
1701 struct __group_domain_type *gtype = data;
1702 unsigned int type = iommu_get_def_domain_type(dev);
1703
1704 if (type) {
1705 if (gtype->type && gtype->type != type) {
1706 dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1707 iommu_domain_type_str(type),
1708 dev_name(gtype->dev),
1709 iommu_domain_type_str(gtype->type));
1710 gtype->type = 0;
1711 }
1712
1713 if (!gtype->dev) {
1714 gtype->dev = dev;
1715 gtype->type = type;
1716 }
1717 }
1718
1719 return 0;
1720}
1721
1722static void probe_alloc_default_domain(struct bus_type *bus,
1723 struct iommu_group *group)
1724{
1725 struct __group_domain_type gtype;
1726
1727 memset(>ype, 0, sizeof(gtype));
1728
1729
1730 __iommu_group_for_each_dev(group, >ype,
1731 probe_get_default_domain_type);
1732
1733 if (!gtype.type)
1734 gtype.type = iommu_def_domain_type;
1735
1736 iommu_group_alloc_default_domain(bus, group, gtype.type);
1737
1738}
1739
1740static int iommu_group_do_dma_attach(struct device *dev, void *data)
1741{
1742 struct iommu_domain *domain = data;
1743 int ret = 0;
1744
1745 if (!iommu_is_attach_deferred(domain, dev))
1746 ret = __iommu_attach_device(domain, dev);
1747
1748 return ret;
1749}
1750
1751static int __iommu_group_dma_attach(struct iommu_group *group)
1752{
1753 return __iommu_group_for_each_dev(group, group->default_domain,
1754 iommu_group_do_dma_attach);
1755}
1756
1757static int iommu_group_do_probe_finalize(struct device *dev, void *data)
1758{
1759 struct iommu_domain *domain = data;
1760
1761 if (domain->ops->probe_finalize)
1762 domain->ops->probe_finalize(dev);
1763
1764 return 0;
1765}
1766
1767static void __iommu_group_dma_finalize(struct iommu_group *group)
1768{
1769 __iommu_group_for_each_dev(group, group->default_domain,
1770 iommu_group_do_probe_finalize);
1771}
1772
1773static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1774{
1775 struct iommu_group *group = data;
1776
1777 iommu_create_device_direct_mappings(group, dev);
1778
1779 return 0;
1780}
1781
1782static int iommu_group_create_direct_mappings(struct iommu_group *group)
1783{
1784 return __iommu_group_for_each_dev(group, group,
1785 iommu_do_create_direct_mappings);
1786}
1787
1788int bus_iommu_probe(struct bus_type *bus)
1789{
1790 struct iommu_group *group, *next;
1791 LIST_HEAD(group_list);
1792 int ret;
1793
1794
1795
1796
1797
1798
1799 ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1800 if (ret)
1801 return ret;
1802
1803 list_for_each_entry_safe(group, next, &group_list, entry) {
1804
1805 list_del_init(&group->entry);
1806
1807 mutex_lock(&group->mutex);
1808
1809
1810 probe_alloc_default_domain(bus, group);
1811
1812 if (!group->default_domain) {
1813 mutex_unlock(&group->mutex);
1814 continue;
1815 }
1816
1817 iommu_group_create_direct_mappings(group);
1818
1819 ret = __iommu_group_dma_attach(group);
1820
1821 mutex_unlock(&group->mutex);
1822
1823 if (ret)
1824 break;
1825
1826 __iommu_group_dma_finalize(group);
1827 }
1828
1829 return ret;
1830}
1831
1832static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1833{
1834 struct notifier_block *nb;
1835 int err;
1836
1837 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1838 if (!nb)
1839 return -ENOMEM;
1840
1841 nb->notifier_call = iommu_bus_notifier;
1842
1843 err = bus_register_notifier(bus, nb);
1844 if (err)
1845 goto out_free;
1846
1847 err = bus_iommu_probe(bus);
1848 if (err)
1849 goto out_err;
1850
1851
1852 return 0;
1853
1854out_err:
1855
1856 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1857 bus_unregister_notifier(bus, nb);
1858
1859out_free:
1860 kfree(nb);
1861
1862 return err;
1863}
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1879{
1880 int err;
1881
1882 if (ops == NULL) {
1883 bus->iommu_ops = NULL;
1884 return 0;
1885 }
1886
1887 if (bus->iommu_ops != NULL)
1888 return -EBUSY;
1889
1890 bus->iommu_ops = ops;
1891
1892
1893 err = iommu_bus_init(bus, ops);
1894 if (err)
1895 bus->iommu_ops = NULL;
1896
1897 return err;
1898}
1899EXPORT_SYMBOL_GPL(bus_set_iommu);
1900
1901bool iommu_present(struct bus_type *bus)
1902{
1903 return bus->iommu_ops != NULL;
1904}
1905EXPORT_SYMBOL_GPL(iommu_present);
1906
1907bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1908{
1909 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1910 return false;
1911
1912 return bus->iommu_ops->capable(cap);
1913}
1914EXPORT_SYMBOL_GPL(iommu_capable);
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928void iommu_set_fault_handler(struct iommu_domain *domain,
1929 iommu_fault_handler_t handler,
1930 void *token)
1931{
1932 BUG_ON(!domain);
1933
1934 domain->handler = handler;
1935 domain->handler_token = token;
1936}
1937EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1938
1939static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1940 unsigned type)
1941{
1942 struct iommu_domain *domain;
1943
1944 if (bus == NULL || bus->iommu_ops == NULL)
1945 return NULL;
1946
1947 domain = bus->iommu_ops->domain_alloc(type);
1948 if (!domain)
1949 return NULL;
1950
1951 domain->ops = bus->iommu_ops;
1952 domain->type = type;
1953
1954 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1955
1956
1957 if (iommu_is_dma_domain(domain) && !domain->iova_cookie && iommu_get_dma_cookie(domain)) {
1958 iommu_domain_free(domain);
1959 domain = NULL;
1960 }
1961 return domain;
1962}
1963
1964struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1965{
1966 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1967}
1968EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1969
1970void iommu_domain_free(struct iommu_domain *domain)
1971{
1972 iommu_put_dma_cookie(domain);
1973 domain->ops->domain_free(domain);
1974}
1975EXPORT_SYMBOL_GPL(iommu_domain_free);
1976
1977static int __iommu_attach_device(struct iommu_domain *domain,
1978 struct device *dev)
1979{
1980 int ret;
1981
1982 if (unlikely(domain->ops->attach_dev == NULL))
1983 return -ENODEV;
1984
1985 ret = domain->ops->attach_dev(domain, dev);
1986 if (!ret)
1987 trace_attach_device_to_domain(dev);
1988 return ret;
1989}
1990
1991int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1992{
1993 struct iommu_group *group;
1994 int ret;
1995
1996 group = iommu_group_get(dev);
1997 if (!group)
1998 return -ENODEV;
1999
2000
2001
2002
2003
2004 mutex_lock(&group->mutex);
2005 ret = -EINVAL;
2006 if (iommu_group_device_count(group) != 1)
2007 goto out_unlock;
2008
2009 ret = __iommu_attach_group(domain, group);
2010
2011out_unlock:
2012 mutex_unlock(&group->mutex);
2013 iommu_group_put(group);
2014
2015 return ret;
2016}
2017EXPORT_SYMBOL_GPL(iommu_attach_device);
2018
2019int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
2020{
2021 const struct iommu_ops *ops = domain->ops;
2022
2023 if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
2024 return __iommu_attach_device(domain, dev);
2025
2026 return 0;
2027}
2028
2029
2030
2031
2032
2033
2034static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
2035{
2036 u32 mask;
2037 int i;
2038
2039 if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
2040 return -EINVAL;
2041
2042 mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
2043 if (info->cache & ~mask)
2044 return -EINVAL;
2045
2046 if (info->granularity >= IOMMU_INV_GRANU_NR)
2047 return -EINVAL;
2048
2049 switch (info->granularity) {
2050 case IOMMU_INV_GRANU_ADDR:
2051 if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
2052 return -EINVAL;
2053
2054 mask = IOMMU_INV_ADDR_FLAGS_PASID |
2055 IOMMU_INV_ADDR_FLAGS_ARCHID |
2056 IOMMU_INV_ADDR_FLAGS_LEAF;
2057
2058 if (info->granu.addr_info.flags & ~mask)
2059 return -EINVAL;
2060 break;
2061 case IOMMU_INV_GRANU_PASID:
2062 mask = IOMMU_INV_PASID_FLAGS_PASID |
2063 IOMMU_INV_PASID_FLAGS_ARCHID;
2064 if (info->granu.pasid_info.flags & ~mask)
2065 return -EINVAL;
2066
2067 break;
2068 case IOMMU_INV_GRANU_DOMAIN:
2069 if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
2070 return -EINVAL;
2071 break;
2072 default:
2073 return -EINVAL;
2074 }
2075
2076
2077 for (i = 0; i < sizeof(info->padding); i++) {
2078 if (info->padding[i])
2079 return -EINVAL;
2080 }
2081
2082 return 0;
2083}
2084
2085int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
2086 void __user *uinfo)
2087{
2088 struct iommu_cache_invalidate_info inv_info = { 0 };
2089 u32 minsz;
2090 int ret;
2091
2092 if (unlikely(!domain->ops->cache_invalidate))
2093 return -ENODEV;
2094
2095
2096
2097
2098
2099 minsz = offsetof(struct iommu_cache_invalidate_info, granu);
2100
2101
2102 if (copy_from_user(&inv_info, uinfo, minsz))
2103 return -EFAULT;
2104
2105
2106 if (inv_info.argsz < minsz)
2107 return -EINVAL;
2108
2109
2110 if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
2111 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
2112 return -EINVAL;
2113
2114 if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
2115 inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
2116 return -EINVAL;
2117
2118
2119
2120
2121
2122
2123
2124 if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
2125 min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
2126 return -EFAULT;
2127
2128
2129 ret = iommu_check_cache_invl_data(&inv_info);
2130 if (ret)
2131 return ret;
2132
2133 return domain->ops->cache_invalidate(domain, dev, &inv_info);
2134}
2135EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
2136
2137static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
2138{
2139 u64 mask;
2140 int i;
2141
2142 if (data->version != IOMMU_GPASID_BIND_VERSION_1)
2143 return -EINVAL;
2144
2145
2146 if (data->format >= IOMMU_PASID_FORMAT_LAST)
2147 return -EINVAL;
2148
2149
2150 mask = IOMMU_SVA_GPASID_VAL;
2151 if (data->flags & ~mask)
2152 return -EINVAL;
2153
2154
2155 for (i = 0; i < sizeof(data->padding); i++) {
2156 if (data->padding[i])
2157 return -EINVAL;
2158 }
2159
2160 return 0;
2161}
2162
2163static int iommu_sva_prepare_bind_data(void __user *udata,
2164 struct iommu_gpasid_bind_data *data)
2165{
2166 u32 minsz;
2167
2168
2169
2170
2171
2172 minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
2173
2174
2175 if (copy_from_user(data, udata, minsz))
2176 return -EFAULT;
2177
2178
2179 if (data->argsz < minsz)
2180 return -EINVAL;
2181
2182
2183
2184
2185
2186
2187
2188 if (copy_from_user((void *)data + minsz, udata + minsz,
2189 min_t(u32, data->argsz, sizeof(*data)) - minsz))
2190 return -EFAULT;
2191
2192 return iommu_check_bind_data(data);
2193}
2194
2195int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
2196 void __user *udata)
2197{
2198 struct iommu_gpasid_bind_data data = { 0 };
2199 int ret;
2200
2201 if (unlikely(!domain->ops->sva_bind_gpasid))
2202 return -ENODEV;
2203
2204 ret = iommu_sva_prepare_bind_data(udata, &data);
2205 if (ret)
2206 return ret;
2207
2208 return domain->ops->sva_bind_gpasid(domain, dev, &data);
2209}
2210EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
2211
2212int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2213 ioasid_t pasid)
2214{
2215 if (unlikely(!domain->ops->sva_unbind_gpasid))
2216 return -ENODEV;
2217
2218 return domain->ops->sva_unbind_gpasid(dev, pasid);
2219}
2220EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
2221
2222int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2223 void __user *udata)
2224{
2225 struct iommu_gpasid_bind_data data = { 0 };
2226 int ret;
2227
2228 if (unlikely(!domain->ops->sva_bind_gpasid))
2229 return -ENODEV;
2230
2231 ret = iommu_sva_prepare_bind_data(udata, &data);
2232 if (ret)
2233 return ret;
2234
2235 return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
2236}
2237EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
2238
2239static void __iommu_detach_device(struct iommu_domain *domain,
2240 struct device *dev)
2241{
2242 if (iommu_is_attach_deferred(domain, dev))
2243 return;
2244
2245 if (unlikely(domain->ops->detach_dev == NULL))
2246 return;
2247
2248 domain->ops->detach_dev(domain, dev);
2249 trace_detach_device_from_domain(dev);
2250}
2251
2252void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2253{
2254 struct iommu_group *group;
2255
2256 group = iommu_group_get(dev);
2257 if (!group)
2258 return;
2259
2260 mutex_lock(&group->mutex);
2261 if (iommu_group_device_count(group) != 1) {
2262 WARN_ON(1);
2263 goto out_unlock;
2264 }
2265
2266 __iommu_detach_group(domain, group);
2267
2268out_unlock:
2269 mutex_unlock(&group->mutex);
2270 iommu_group_put(group);
2271}
2272EXPORT_SYMBOL_GPL(iommu_detach_device);
2273
2274struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2275{
2276 struct iommu_domain *domain;
2277 struct iommu_group *group;
2278
2279 group = iommu_group_get(dev);
2280 if (!group)
2281 return NULL;
2282
2283 domain = group->domain;
2284
2285 iommu_group_put(group);
2286
2287 return domain;
2288}
2289EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2290
2291
2292
2293
2294
2295struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2296{
2297 return dev->iommu_group->default_domain;
2298}
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310static int iommu_group_do_attach_device(struct device *dev, void *data)
2311{
2312 struct iommu_domain *domain = data;
2313
2314 return __iommu_attach_device(domain, dev);
2315}
2316
2317static int __iommu_attach_group(struct iommu_domain *domain,
2318 struct iommu_group *group)
2319{
2320 int ret;
2321
2322 if (group->default_domain && group->domain != group->default_domain)
2323 return -EBUSY;
2324
2325 ret = __iommu_group_for_each_dev(group, domain,
2326 iommu_group_do_attach_device);
2327 if (ret == 0)
2328 group->domain = domain;
2329
2330 return ret;
2331}
2332
2333int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2334{
2335 int ret;
2336
2337 mutex_lock(&group->mutex);
2338 ret = __iommu_attach_group(domain, group);
2339 mutex_unlock(&group->mutex);
2340
2341 return ret;
2342}
2343EXPORT_SYMBOL_GPL(iommu_attach_group);
2344
2345static int iommu_group_do_detach_device(struct device *dev, void *data)
2346{
2347 struct iommu_domain *domain = data;
2348
2349 __iommu_detach_device(domain, dev);
2350
2351 return 0;
2352}
2353
2354static void __iommu_detach_group(struct iommu_domain *domain,
2355 struct iommu_group *group)
2356{
2357 int ret;
2358
2359 if (!group->default_domain) {
2360 __iommu_group_for_each_dev(group, domain,
2361 iommu_group_do_detach_device);
2362 group->domain = NULL;
2363 return;
2364 }
2365
2366 if (group->domain == group->default_domain)
2367 return;
2368
2369
2370 ret = __iommu_group_for_each_dev(group, group->default_domain,
2371 iommu_group_do_attach_device);
2372 if (ret != 0)
2373 WARN_ON(1);
2374 else
2375 group->domain = group->default_domain;
2376}
2377
2378void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2379{
2380 mutex_lock(&group->mutex);
2381 __iommu_detach_group(domain, group);
2382 mutex_unlock(&group->mutex);
2383}
2384EXPORT_SYMBOL_GPL(iommu_detach_group);
2385
2386phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2387{
2388 if (domain->type == IOMMU_DOMAIN_IDENTITY)
2389 return iova;
2390
2391 if (domain->type == IOMMU_DOMAIN_BLOCKED)
2392 return 0;
2393
2394 return domain->ops->iova_to_phys(domain, iova);
2395}
2396EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2397
2398static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2399 phys_addr_t paddr, size_t size, size_t *count)
2400{
2401 unsigned int pgsize_idx, pgsize_idx_next;
2402 unsigned long pgsizes;
2403 size_t offset, pgsize, pgsize_next;
2404 unsigned long addr_merge = paddr | iova;
2405
2406
2407 pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
2408
2409
2410 if (likely(addr_merge))
2411 pgsizes &= GENMASK(__ffs(addr_merge), 0);
2412
2413
2414 BUG_ON(!pgsizes);
2415
2416
2417 pgsize_idx = __fls(pgsizes);
2418 pgsize = BIT(pgsize_idx);
2419 if (!count)
2420 return pgsize;
2421
2422
2423 pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
2424 if (!pgsizes)
2425 goto out_set_count;
2426
2427 pgsize_idx_next = __ffs(pgsizes);
2428 pgsize_next = BIT(pgsize_idx_next);
2429
2430
2431
2432
2433
2434 if ((iova ^ paddr) & (pgsize_next - 1))
2435 goto out_set_count;
2436
2437
2438 offset = pgsize_next - (addr_merge & (pgsize_next - 1));
2439
2440
2441
2442
2443
2444 if (offset + pgsize_next <= size)
2445 size = offset;
2446
2447out_set_count:
2448 *count = size >> pgsize_idx;
2449 return pgsize;
2450}
2451
2452static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
2453 phys_addr_t paddr, size_t size, int prot,
2454 gfp_t gfp, size_t *mapped)
2455{
2456 const struct iommu_ops *ops = domain->ops;
2457 size_t pgsize, count;
2458 int ret;
2459
2460 pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
2461
2462 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
2463 iova, &paddr, pgsize, count);
2464
2465 if (ops->map_pages) {
2466 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
2467 gfp, mapped);
2468 } else {
2469 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2470 *mapped = ret ? 0 : pgsize;
2471 }
2472
2473 return ret;
2474}
2475
2476static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2477 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2478{
2479 const struct iommu_ops *ops = domain->ops;
2480 unsigned long orig_iova = iova;
2481 unsigned int min_pagesz;
2482 size_t orig_size = size;
2483 phys_addr_t orig_paddr = paddr;
2484 int ret = 0;
2485
2486 if (unlikely(!(ops->map || ops->map_pages) ||
2487 domain->pgsize_bitmap == 0UL))
2488 return -ENODEV;
2489
2490 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2491 return -EINVAL;
2492
2493
2494 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2495
2496
2497
2498
2499
2500
2501 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2502 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2503 iova, &paddr, size, min_pagesz);
2504 return -EINVAL;
2505 }
2506
2507 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2508
2509 while (size) {
2510 size_t mapped = 0;
2511
2512 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
2513 &mapped);
2514
2515
2516
2517
2518 size -= mapped;
2519
2520 if (ret)
2521 break;
2522
2523 iova += mapped;
2524 paddr += mapped;
2525 }
2526
2527
2528 if (ret)
2529 iommu_unmap(domain, orig_iova, orig_size - size);
2530 else
2531 trace_map(orig_iova, orig_paddr, orig_size);
2532
2533 return ret;
2534}
2535
2536static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
2537 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2538{
2539 const struct iommu_ops *ops = domain->ops;
2540 int ret;
2541
2542 ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2543 if (ret == 0 && ops->iotlb_sync_map)
2544 ops->iotlb_sync_map(domain, iova, size);
2545
2546 return ret;
2547}
2548
2549int iommu_map(struct iommu_domain *domain, unsigned long iova,
2550 phys_addr_t paddr, size_t size, int prot)
2551{
2552 might_sleep();
2553 return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2554}
2555EXPORT_SYMBOL_GPL(iommu_map);
2556
2557int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2558 phys_addr_t paddr, size_t size, int prot)
2559{
2560 return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2561}
2562EXPORT_SYMBOL_GPL(iommu_map_atomic);
2563
2564static size_t __iommu_unmap_pages(struct iommu_domain *domain,
2565 unsigned long iova, size_t size,
2566 struct iommu_iotlb_gather *iotlb_gather)
2567{
2568 const struct iommu_ops *ops = domain->ops;
2569 size_t pgsize, count;
2570
2571 pgsize = iommu_pgsize(domain, iova, iova, size, &count);
2572 return ops->unmap_pages ?
2573 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
2574 ops->unmap(domain, iova, pgsize, iotlb_gather);
2575}
2576
2577static size_t __iommu_unmap(struct iommu_domain *domain,
2578 unsigned long iova, size_t size,
2579 struct iommu_iotlb_gather *iotlb_gather)
2580{
2581 const struct iommu_ops *ops = domain->ops;
2582 size_t unmapped_page, unmapped = 0;
2583 unsigned long orig_iova = iova;
2584 unsigned int min_pagesz;
2585
2586 if (unlikely(!(ops->unmap || ops->unmap_pages) ||
2587 domain->pgsize_bitmap == 0UL))
2588 return 0;
2589
2590 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2591 return 0;
2592
2593
2594 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2595
2596
2597
2598
2599
2600
2601 if (!IS_ALIGNED(iova | size, min_pagesz)) {
2602 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2603 iova, size, min_pagesz);
2604 return 0;
2605 }
2606
2607 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2608
2609
2610
2611
2612
2613 while (unmapped < size) {
2614 unmapped_page = __iommu_unmap_pages(domain, iova,
2615 size - unmapped,
2616 iotlb_gather);
2617 if (!unmapped_page)
2618 break;
2619
2620 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2621 iova, unmapped_page);
2622
2623 iova += unmapped_page;
2624 unmapped += unmapped_page;
2625 }
2626
2627 trace_unmap(orig_iova, size, unmapped);
2628 return unmapped;
2629}
2630
2631size_t iommu_unmap(struct iommu_domain *domain,
2632 unsigned long iova, size_t size)
2633{
2634 struct iommu_iotlb_gather iotlb_gather;
2635 size_t ret;
2636
2637 iommu_iotlb_gather_init(&iotlb_gather);
2638 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2639 iommu_iotlb_sync(domain, &iotlb_gather);
2640
2641 return ret;
2642}
2643EXPORT_SYMBOL_GPL(iommu_unmap);
2644
2645size_t iommu_unmap_fast(struct iommu_domain *domain,
2646 unsigned long iova, size_t size,
2647 struct iommu_iotlb_gather *iotlb_gather)
2648{
2649 return __iommu_unmap(domain, iova, size, iotlb_gather);
2650}
2651EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2652
2653static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2654 struct scatterlist *sg, unsigned int nents, int prot,
2655 gfp_t gfp)
2656{
2657 const struct iommu_ops *ops = domain->ops;
2658 size_t len = 0, mapped = 0;
2659 phys_addr_t start;
2660 unsigned int i = 0;
2661 int ret;
2662
2663 while (i <= nents) {
2664 phys_addr_t s_phys = sg_phys(sg);
2665
2666 if (len && s_phys != start + len) {
2667 ret = __iommu_map(domain, iova + mapped, start,
2668 len, prot, gfp);
2669
2670 if (ret)
2671 goto out_err;
2672
2673 mapped += len;
2674 len = 0;
2675 }
2676
2677 if (len) {
2678 len += sg->length;
2679 } else {
2680 len = sg->length;
2681 start = s_phys;
2682 }
2683
2684 if (++i < nents)
2685 sg = sg_next(sg);
2686 }
2687
2688 if (ops->iotlb_sync_map)
2689 ops->iotlb_sync_map(domain, iova, mapped);
2690 return mapped;
2691
2692out_err:
2693
2694 iommu_unmap(domain, iova, mapped);
2695
2696 return ret;
2697}
2698
2699ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2700 struct scatterlist *sg, unsigned int nents, int prot)
2701{
2702 might_sleep();
2703 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2704}
2705EXPORT_SYMBOL_GPL(iommu_map_sg);
2706
2707ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2708 struct scatterlist *sg, unsigned int nents, int prot)
2709{
2710 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2711}
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2738 unsigned long iova, int flags)
2739{
2740 int ret = -ENOSYS;
2741
2742
2743
2744
2745
2746 if (domain->handler)
2747 ret = domain->handler(domain, dev, iova, flags,
2748 domain->handler_token);
2749
2750 trace_io_page_fault(dev, iova, flags);
2751 return ret;
2752}
2753EXPORT_SYMBOL_GPL(report_iommu_fault);
2754
2755static int __init iommu_init(void)
2756{
2757 iommu_group_kset = kset_create_and_add("iommu_groups",
2758 NULL, kernel_kobj);
2759 BUG_ON(!iommu_group_kset);
2760
2761 iommu_debugfs_setup();
2762
2763 return 0;
2764}
2765core_initcall(iommu_init);
2766
2767int iommu_enable_nesting(struct iommu_domain *domain)
2768{
2769 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2770 return -EINVAL;
2771 if (!domain->ops->enable_nesting)
2772 return -EINVAL;
2773 return domain->ops->enable_nesting(domain);
2774}
2775EXPORT_SYMBOL_GPL(iommu_enable_nesting);
2776
2777int iommu_set_pgtable_quirks(struct iommu_domain *domain,
2778 unsigned long quirk)
2779{
2780 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2781 return -EINVAL;
2782 if (!domain->ops->set_pgtable_quirks)
2783 return -EINVAL;
2784 return domain->ops->set_pgtable_quirks(domain, quirk);
2785}
2786EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
2787
2788void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2789{
2790 const struct iommu_ops *ops = dev->bus->iommu_ops;
2791
2792 if (ops && ops->get_resv_regions)
2793 ops->get_resv_regions(dev, list);
2794}
2795
2796void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2797{
2798 const struct iommu_ops *ops = dev->bus->iommu_ops;
2799
2800 if (ops && ops->put_resv_regions)
2801 ops->put_resv_regions(dev, list);
2802}
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2815{
2816 struct iommu_resv_region *entry, *next;
2817
2818 list_for_each_entry_safe(entry, next, list, list)
2819 kfree(entry);
2820}
2821EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2822
2823struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2824 size_t length, int prot,
2825 enum iommu_resv_type type)
2826{
2827 struct iommu_resv_region *region;
2828
2829 region = kzalloc(sizeof(*region), GFP_KERNEL);
2830 if (!region)
2831 return NULL;
2832
2833 INIT_LIST_HEAD(®ion->list);
2834 region->start = start;
2835 region->length = length;
2836 region->prot = prot;
2837 region->type = type;
2838 return region;
2839}
2840EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2841
2842void iommu_set_default_passthrough(bool cmd_line)
2843{
2844 if (cmd_line)
2845 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2846 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2847}
2848
2849void iommu_set_default_translated(bool cmd_line)
2850{
2851 if (cmd_line)
2852 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2853 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2854}
2855
2856bool iommu_default_passthrough(void)
2857{
2858 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2859}
2860EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2861
2862const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2863{
2864 const struct iommu_ops *ops = NULL;
2865 struct iommu_device *iommu;
2866
2867 spin_lock(&iommu_device_lock);
2868 list_for_each_entry(iommu, &iommu_device_list, list)
2869 if (iommu->fwnode == fwnode) {
2870 ops = iommu->ops;
2871 break;
2872 }
2873 spin_unlock(&iommu_device_lock);
2874 return ops;
2875}
2876
2877int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2878 const struct iommu_ops *ops)
2879{
2880 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2881
2882 if (fwspec)
2883 return ops == fwspec->ops ? 0 : -EINVAL;
2884
2885 if (!dev_iommu_get(dev))
2886 return -ENOMEM;
2887
2888
2889 fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2890 if (!fwspec)
2891 return -ENOMEM;
2892
2893 of_node_get(to_of_node(iommu_fwnode));
2894 fwspec->iommu_fwnode = iommu_fwnode;
2895 fwspec->ops = ops;
2896 dev_iommu_fwspec_set(dev, fwspec);
2897 return 0;
2898}
2899EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2900
2901void iommu_fwspec_free(struct device *dev)
2902{
2903 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2904
2905 if (fwspec) {
2906 fwnode_handle_put(fwspec->iommu_fwnode);
2907 kfree(fwspec);
2908 dev_iommu_fwspec_set(dev, NULL);
2909 }
2910}
2911EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2912
2913int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2914{
2915 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2916 int i, new_num;
2917
2918 if (!fwspec)
2919 return -EINVAL;
2920
2921 new_num = fwspec->num_ids + num_ids;
2922 if (new_num > 1) {
2923 fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2924 GFP_KERNEL);
2925 if (!fwspec)
2926 return -ENOMEM;
2927
2928 dev_iommu_fwspec_set(dev, fwspec);
2929 }
2930
2931 for (i = 0; i < num_ids; i++)
2932 fwspec->ids[fwspec->num_ids + i] = ids[i];
2933
2934 fwspec->num_ids = new_num;
2935 return 0;
2936}
2937EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2938
2939
2940
2941
2942int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2943{
2944 if (dev->iommu && dev->iommu->iommu_dev) {
2945 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2946
2947 if (ops->dev_enable_feat)
2948 return ops->dev_enable_feat(dev, feat);
2949 }
2950
2951 return -ENODEV;
2952}
2953EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2954
2955
2956
2957
2958
2959
2960int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2961{
2962 if (dev->iommu && dev->iommu->iommu_dev) {
2963 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2964
2965 if (ops->dev_disable_feat)
2966 return ops->dev_disable_feat(dev, feat);
2967 }
2968
2969 return -EBUSY;
2970}
2971EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2972
2973bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2974{
2975 if (dev->iommu && dev->iommu->iommu_dev) {
2976 const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2977
2978 if (ops->dev_feat_enabled)
2979 return ops->dev_feat_enabled(dev, feat);
2980 }
2981
2982 return false;
2983}
2984EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2997{
2998 int ret = -ENODEV;
2999
3000 if (domain->ops->aux_attach_dev)
3001 ret = domain->ops->aux_attach_dev(domain, dev);
3002
3003 if (!ret)
3004 trace_attach_device_to_domain(dev);
3005
3006 return ret;
3007}
3008EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
3009
3010void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
3011{
3012 if (domain->ops->aux_detach_dev) {
3013 domain->ops->aux_detach_dev(domain, dev);
3014 trace_detach_device_from_domain(dev);
3015 }
3016}
3017EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
3018
3019int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
3020{
3021 int ret = -ENODEV;
3022
3023 if (domain->ops->aux_get_pasid)
3024 ret = domain->ops->aux_get_pasid(domain, dev);
3025
3026 return ret;
3027}
3028EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045struct iommu_sva *
3046iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
3047{
3048 struct iommu_group *group;
3049 struct iommu_sva *handle = ERR_PTR(-EINVAL);
3050 const struct iommu_ops *ops = dev->bus->iommu_ops;
3051
3052 if (!ops || !ops->sva_bind)
3053 return ERR_PTR(-ENODEV);
3054
3055 group = iommu_group_get(dev);
3056 if (!group)
3057 return ERR_PTR(-ENODEV);
3058
3059
3060 mutex_lock(&group->mutex);
3061
3062
3063
3064
3065
3066
3067
3068 if (iommu_group_device_count(group) != 1)
3069 goto out_unlock;
3070
3071 handle = ops->sva_bind(dev, mm, drvdata);
3072
3073out_unlock:
3074 mutex_unlock(&group->mutex);
3075 iommu_group_put(group);
3076
3077 return handle;
3078}
3079EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089void iommu_sva_unbind_device(struct iommu_sva *handle)
3090{
3091 struct iommu_group *group;
3092 struct device *dev = handle->dev;
3093 const struct iommu_ops *ops = dev->bus->iommu_ops;
3094
3095 if (!ops || !ops->sva_unbind)
3096 return;
3097
3098 group = iommu_group_get(dev);
3099 if (!group)
3100 return;
3101
3102 mutex_lock(&group->mutex);
3103 ops->sva_unbind(handle);
3104 mutex_unlock(&group->mutex);
3105
3106 iommu_group_put(group);
3107}
3108EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
3109
3110u32 iommu_sva_get_pasid(struct iommu_sva *handle)
3111{
3112 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
3113
3114 if (!ops || !ops->sva_get_pasid)
3115 return IOMMU_PASID_INVALID;
3116
3117 return ops->sva_get_pasid(handle);
3118}
3119EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136static int iommu_change_dev_def_domain(struct iommu_group *group,
3137 struct device *prev_dev, int type)
3138{
3139 struct iommu_domain *prev_dom;
3140 struct group_device *grp_dev;
3141 int ret, dev_def_dom;
3142 struct device *dev;
3143
3144 mutex_lock(&group->mutex);
3145
3146 if (group->default_domain != group->domain) {
3147 dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
3148 ret = -EBUSY;
3149 goto out;
3150 }
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165 if (iommu_group_device_count(group) != 1) {
3166 dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
3167 ret = -EINVAL;
3168 goto out;
3169 }
3170
3171
3172 grp_dev = list_first_entry(&group->devices, struct group_device, list);
3173 dev = grp_dev->dev;
3174
3175 if (prev_dev != dev) {
3176 dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
3177 ret = -EBUSY;
3178 goto out;
3179 }
3180
3181 prev_dom = group->default_domain;
3182 if (!prev_dom) {
3183 ret = -EINVAL;
3184 goto out;
3185 }
3186
3187 dev_def_dom = iommu_get_def_domain_type(dev);
3188 if (!type) {
3189
3190
3191
3192
3193
3194 type = dev_def_dom ? : iommu_def_domain_type;
3195 } else if (dev_def_dom && type != dev_def_dom) {
3196 dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
3197 iommu_domain_type_str(type));
3198 ret = -EINVAL;
3199 goto out;
3200 }
3201
3202
3203
3204
3205
3206 if (prev_dom->type == type) {
3207 ret = 0;
3208 goto out;
3209 }
3210
3211
3212 if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) {
3213 ret = iommu_dma_init_fq(prev_dom);
3214 if (!ret)
3215 prev_dom->type = IOMMU_DOMAIN_DMA_FQ;
3216 goto out;
3217 }
3218
3219
3220 ret = iommu_group_alloc_default_domain(dev->bus, group, type);
3221 if (ret)
3222 goto out;
3223
3224 ret = iommu_create_device_direct_mappings(group, dev);
3225 if (ret)
3226 goto free_new_domain;
3227
3228 ret = __iommu_attach_device(group->default_domain, dev);
3229 if (ret)
3230 goto free_new_domain;
3231
3232 group->domain = group->default_domain;
3233
3234
3235
3236
3237
3238
3239
3240 mutex_unlock(&group->mutex);
3241
3242
3243 iommu_group_do_probe_finalize(dev, group->default_domain);
3244 iommu_domain_free(prev_dom);
3245 return 0;
3246
3247free_new_domain:
3248 iommu_domain_free(group->default_domain);
3249 group->default_domain = prev_dom;
3250 group->domain = prev_dom;
3251
3252out:
3253 mutex_unlock(&group->mutex);
3254
3255 return ret;
3256}
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267static ssize_t iommu_group_store_type(struct iommu_group *group,
3268 const char *buf, size_t count)
3269{
3270 struct group_device *grp_dev;
3271 struct device *dev;
3272 int ret, req_type;
3273
3274 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3275 return -EACCES;
3276
3277 if (WARN_ON(!group))
3278 return -EINVAL;
3279
3280 if (sysfs_streq(buf, "identity"))
3281 req_type = IOMMU_DOMAIN_IDENTITY;
3282 else if (sysfs_streq(buf, "DMA"))
3283 req_type = IOMMU_DOMAIN_DMA;
3284 else if (sysfs_streq(buf, "DMA-FQ"))
3285 req_type = IOMMU_DOMAIN_DMA_FQ;
3286 else if (sysfs_streq(buf, "auto"))
3287 req_type = 0;
3288 else
3289 return -EINVAL;
3290
3291
3292
3293
3294
3295
3296
3297 mutex_lock(&group->mutex);
3298 if (iommu_group_device_count(group) != 1) {
3299 mutex_unlock(&group->mutex);
3300 pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
3301 return -EINVAL;
3302 }
3303
3304
3305 grp_dev = list_first_entry(&group->devices, struct group_device, list);
3306 dev = grp_dev->dev;
3307 get_device(dev);
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333 mutex_unlock(&group->mutex);
3334
3335
3336 device_lock(dev);
3337 if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ &&
3338 group->default_domain->type == IOMMU_DOMAIN_DMA)) {
3339 pr_err_ratelimited("Device is still bound to driver\n");
3340 ret = -EBUSY;
3341 goto out;
3342 }
3343
3344 ret = iommu_change_dev_def_domain(group, dev, req_type);
3345 ret = ret ?: count;
3346
3347out:
3348 device_unlock(dev);
3349 put_device(dev);
3350
3351 return ret;
3352}
3353