1
2
3
4
5
6
7#define pr_fmt(fmt) "iommu: " fmt
8
9#include <linux/device.h>
10#include <linux/kernel.h>
11#include <linux/bug.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/export.h>
15#include <linux/slab.h>
16#include <linux/errno.h>
17#include <linux/iommu.h>
18#include <linux/idr.h>
19#include <linux/notifier.h>
20#include <linux/err.h>
21#include <linux/pci.h>
22#include <linux/bitops.h>
23#include <linux/property.h>
24#include <linux/fsl/mc.h>
25#include <trace/events/iommu.h>
26
27static struct kset *iommu_group_kset;
28static DEFINE_IDA(iommu_group_ida);
29#ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
30static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
31#else
32static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
33#endif
34static bool iommu_dma_strict __read_mostly = true;
35
36struct iommu_group {
37 struct kobject kobj;
38 struct kobject *devices_kobj;
39 struct list_head devices;
40 struct mutex mutex;
41 struct blocking_notifier_head notifier;
42 void *iommu_data;
43 void (*iommu_data_release)(void *iommu_data);
44 char *name;
45 int id;
46 struct iommu_domain *default_domain;
47 struct iommu_domain *domain;
48};
49
50struct group_device {
51 struct list_head list;
52 struct device *dev;
53 char *name;
54};
55
56struct iommu_group_attribute {
57 struct attribute attr;
58 ssize_t (*show)(struct iommu_group *group, char *buf);
59 ssize_t (*store)(struct iommu_group *group,
60 const char *buf, size_t count);
61};
62
63static const char * const iommu_group_resv_type_string[] = {
64 [IOMMU_RESV_DIRECT] = "direct",
65 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
66 [IOMMU_RESV_RESERVED] = "reserved",
67 [IOMMU_RESV_MSI] = "msi",
68 [IOMMU_RESV_SW_MSI] = "msi",
69};
70
71#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
72struct iommu_group_attribute iommu_group_attr_##_name = \
73 __ATTR(_name, _mode, _show, _store)
74
75#define to_iommu_group_attr(_attr) \
76 container_of(_attr, struct iommu_group_attribute, attr)
77#define to_iommu_group(_kobj) \
78 container_of(_kobj, struct iommu_group, kobj)
79
80static LIST_HEAD(iommu_device_list);
81static DEFINE_SPINLOCK(iommu_device_lock);
82
83int iommu_device_register(struct iommu_device *iommu)
84{
85 spin_lock(&iommu_device_lock);
86 list_add_tail(&iommu->list, &iommu_device_list);
87 spin_unlock(&iommu_device_lock);
88
89 return 0;
90}
91
92void iommu_device_unregister(struct iommu_device *iommu)
93{
94 spin_lock(&iommu_device_lock);
95 list_del(&iommu->list);
96 spin_unlock(&iommu_device_lock);
97}
98
99static struct iommu_param *iommu_get_dev_param(struct device *dev)
100{
101 struct iommu_param *param = dev->iommu_param;
102
103 if (param)
104 return param;
105
106 param = kzalloc(sizeof(*param), GFP_KERNEL);
107 if (!param)
108 return NULL;
109
110 mutex_init(¶m->lock);
111 dev->iommu_param = param;
112 return param;
113}
114
115static void iommu_free_dev_param(struct device *dev)
116{
117 kfree(dev->iommu_param);
118 dev->iommu_param = NULL;
119}
120
121int iommu_probe_device(struct device *dev)
122{
123 const struct iommu_ops *ops = dev->bus->iommu_ops;
124 int ret;
125
126 WARN_ON(dev->iommu_group);
127 if (!ops)
128 return -EINVAL;
129
130 if (!iommu_get_dev_param(dev))
131 return -ENOMEM;
132
133 ret = ops->add_device(dev);
134 if (ret)
135 iommu_free_dev_param(dev);
136
137 return ret;
138}
139
140void iommu_release_device(struct device *dev)
141{
142 const struct iommu_ops *ops = dev->bus->iommu_ops;
143
144 if (dev->iommu_group)
145 ops->remove_device(dev);
146
147 iommu_free_dev_param(dev);
148}
149
150static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
151 unsigned type);
152static int __iommu_attach_device(struct iommu_domain *domain,
153 struct device *dev);
154static int __iommu_attach_group(struct iommu_domain *domain,
155 struct iommu_group *group);
156static void __iommu_detach_group(struct iommu_domain *domain,
157 struct iommu_group *group);
158
159static int __init iommu_set_def_domain_type(char *str)
160{
161 bool pt;
162 int ret;
163
164 ret = kstrtobool(str, &pt);
165 if (ret)
166 return ret;
167
168 iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
169 return 0;
170}
171early_param("iommu.passthrough", iommu_set_def_domain_type);
172
173static int __init iommu_dma_setup(char *str)
174{
175 return kstrtobool(str, &iommu_dma_strict);
176}
177early_param("iommu.strict", iommu_dma_setup);
178
179static ssize_t iommu_group_attr_show(struct kobject *kobj,
180 struct attribute *__attr, char *buf)
181{
182 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
183 struct iommu_group *group = to_iommu_group(kobj);
184 ssize_t ret = -EIO;
185
186 if (attr->show)
187 ret = attr->show(group, buf);
188 return ret;
189}
190
191static ssize_t iommu_group_attr_store(struct kobject *kobj,
192 struct attribute *__attr,
193 const char *buf, size_t count)
194{
195 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
196 struct iommu_group *group = to_iommu_group(kobj);
197 ssize_t ret = -EIO;
198
199 if (attr->store)
200 ret = attr->store(group, buf, count);
201 return ret;
202}
203
204static const struct sysfs_ops iommu_group_sysfs_ops = {
205 .show = iommu_group_attr_show,
206 .store = iommu_group_attr_store,
207};
208
209static int iommu_group_create_file(struct iommu_group *group,
210 struct iommu_group_attribute *attr)
211{
212 return sysfs_create_file(&group->kobj, &attr->attr);
213}
214
215static void iommu_group_remove_file(struct iommu_group *group,
216 struct iommu_group_attribute *attr)
217{
218 sysfs_remove_file(&group->kobj, &attr->attr);
219}
220
221static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
222{
223 return sprintf(buf, "%s\n", group->name);
224}
225
226
227
228
229
230
231
232
233
234
235
236
237
238static int iommu_insert_resv_region(struct iommu_resv_region *new,
239 struct list_head *regions)
240{
241 struct iommu_resv_region *region;
242 phys_addr_t start = new->start;
243 phys_addr_t end = new->start + new->length - 1;
244 struct list_head *pos = regions->next;
245
246 while (pos != regions) {
247 struct iommu_resv_region *entry =
248 list_entry(pos, struct iommu_resv_region, list);
249 phys_addr_t a = entry->start;
250 phys_addr_t b = entry->start + entry->length - 1;
251 int type = entry->type;
252
253 if (end < a) {
254 goto insert;
255 } else if (start > b) {
256 pos = pos->next;
257 } else if ((start >= a) && (end <= b)) {
258 if (new->type == type)
259 return 0;
260 else
261 pos = pos->next;
262 } else {
263 if (new->type == type) {
264 phys_addr_t new_start = min(a, start);
265 phys_addr_t new_end = max(b, end);
266 int ret;
267
268 list_del(&entry->list);
269 entry->start = new_start;
270 entry->length = new_end - new_start + 1;
271 ret = iommu_insert_resv_region(entry, regions);
272 kfree(entry);
273 return ret;
274 } else {
275 pos = pos->next;
276 }
277 }
278 }
279insert:
280 region = iommu_alloc_resv_region(new->start, new->length,
281 new->prot, new->type);
282 if (!region)
283 return -ENOMEM;
284
285 list_add_tail(®ion->list, pos);
286 return 0;
287}
288
289static int
290iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
291 struct list_head *group_resv_regions)
292{
293 struct iommu_resv_region *entry;
294 int ret = 0;
295
296 list_for_each_entry(entry, dev_resv_regions, list) {
297 ret = iommu_insert_resv_region(entry, group_resv_regions);
298 if (ret)
299 break;
300 }
301 return ret;
302}
303
304int iommu_get_group_resv_regions(struct iommu_group *group,
305 struct list_head *head)
306{
307 struct group_device *device;
308 int ret = 0;
309
310 mutex_lock(&group->mutex);
311 list_for_each_entry(device, &group->devices, list) {
312 struct list_head dev_resv_regions;
313
314 INIT_LIST_HEAD(&dev_resv_regions);
315 iommu_get_resv_regions(device->dev, &dev_resv_regions);
316 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
317 iommu_put_resv_regions(device->dev, &dev_resv_regions);
318 if (ret)
319 break;
320 }
321 mutex_unlock(&group->mutex);
322 return ret;
323}
324EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
325
326static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
327 char *buf)
328{
329 struct iommu_resv_region *region, *next;
330 struct list_head group_resv_regions;
331 char *str = buf;
332
333 INIT_LIST_HEAD(&group_resv_regions);
334 iommu_get_group_resv_regions(group, &group_resv_regions);
335
336 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
337 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
338 (long long int)region->start,
339 (long long int)(region->start +
340 region->length - 1),
341 iommu_group_resv_type_string[region->type]);
342 kfree(region);
343 }
344
345 return (str - buf);
346}
347
348static ssize_t iommu_group_show_type(struct iommu_group *group,
349 char *buf)
350{
351 char *type = "unknown\n";
352
353 if (group->default_domain) {
354 switch (group->default_domain->type) {
355 case IOMMU_DOMAIN_BLOCKED:
356 type = "blocked\n";
357 break;
358 case IOMMU_DOMAIN_IDENTITY:
359 type = "identity\n";
360 break;
361 case IOMMU_DOMAIN_UNMANAGED:
362 type = "unmanaged\n";
363 break;
364 case IOMMU_DOMAIN_DMA:
365 type = "DMA\n";
366 break;
367 }
368 }
369 strcpy(buf, type);
370
371 return strlen(type);
372}
373
374static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
375
376static IOMMU_GROUP_ATTR(reserved_regions, 0444,
377 iommu_group_show_resv_regions, NULL);
378
379static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
380
381static void iommu_group_release(struct kobject *kobj)
382{
383 struct iommu_group *group = to_iommu_group(kobj);
384
385 pr_debug("Releasing group %d\n", group->id);
386
387 if (group->iommu_data_release)
388 group->iommu_data_release(group->iommu_data);
389
390 ida_simple_remove(&iommu_group_ida, group->id);
391
392 if (group->default_domain)
393 iommu_domain_free(group->default_domain);
394
395 kfree(group->name);
396 kfree(group);
397}
398
399static struct kobj_type iommu_group_ktype = {
400 .sysfs_ops = &iommu_group_sysfs_ops,
401 .release = iommu_group_release,
402};
403
404
405
406
407
408
409
410
411
412
413
414
415struct iommu_group *iommu_group_alloc(void)
416{
417 struct iommu_group *group;
418 int ret;
419
420 group = kzalloc(sizeof(*group), GFP_KERNEL);
421 if (!group)
422 return ERR_PTR(-ENOMEM);
423
424 group->kobj.kset = iommu_group_kset;
425 mutex_init(&group->mutex);
426 INIT_LIST_HEAD(&group->devices);
427 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
428
429 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
430 if (ret < 0) {
431 kfree(group);
432 return ERR_PTR(ret);
433 }
434 group->id = ret;
435
436 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
437 NULL, "%d", group->id);
438 if (ret) {
439 ida_simple_remove(&iommu_group_ida, group->id);
440 kfree(group);
441 return ERR_PTR(ret);
442 }
443
444 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
445 if (!group->devices_kobj) {
446 kobject_put(&group->kobj);
447 return ERR_PTR(-ENOMEM);
448 }
449
450
451
452
453
454
455 kobject_put(&group->kobj);
456
457 ret = iommu_group_create_file(group,
458 &iommu_group_attr_reserved_regions);
459 if (ret)
460 return ERR_PTR(ret);
461
462 ret = iommu_group_create_file(group, &iommu_group_attr_type);
463 if (ret)
464 return ERR_PTR(ret);
465
466 pr_debug("Allocated group %d\n", group->id);
467
468 return group;
469}
470EXPORT_SYMBOL_GPL(iommu_group_alloc);
471
472struct iommu_group *iommu_group_get_by_id(int id)
473{
474 struct kobject *group_kobj;
475 struct iommu_group *group;
476 const char *name;
477
478 if (!iommu_group_kset)
479 return NULL;
480
481 name = kasprintf(GFP_KERNEL, "%d", id);
482 if (!name)
483 return NULL;
484
485 group_kobj = kset_find_obj(iommu_group_kset, name);
486 kfree(name);
487
488 if (!group_kobj)
489 return NULL;
490
491 group = container_of(group_kobj, struct iommu_group, kobj);
492 BUG_ON(group->id != id);
493
494 kobject_get(group->devices_kobj);
495 kobject_put(&group->kobj);
496
497 return group;
498}
499EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
500
501
502
503
504
505
506
507
508
509void *iommu_group_get_iommudata(struct iommu_group *group)
510{
511 return group->iommu_data;
512}
513EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
514
515
516
517
518
519
520
521
522
523
524
525void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
526 void (*release)(void *iommu_data))
527{
528 group->iommu_data = iommu_data;
529 group->iommu_data_release = release;
530}
531EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
532
533
534
535
536
537
538
539
540
541int iommu_group_set_name(struct iommu_group *group, const char *name)
542{
543 int ret;
544
545 if (group->name) {
546 iommu_group_remove_file(group, &iommu_group_attr_name);
547 kfree(group->name);
548 group->name = NULL;
549 if (!name)
550 return 0;
551 }
552
553 group->name = kstrdup(name, GFP_KERNEL);
554 if (!group->name)
555 return -ENOMEM;
556
557 ret = iommu_group_create_file(group, &iommu_group_attr_name);
558 if (ret) {
559 kfree(group->name);
560 group->name = NULL;
561 return ret;
562 }
563
564 return 0;
565}
566EXPORT_SYMBOL_GPL(iommu_group_set_name);
567
568static int iommu_group_create_direct_mappings(struct iommu_group *group,
569 struct device *dev)
570{
571 struct iommu_domain *domain = group->default_domain;
572 struct iommu_resv_region *entry;
573 struct list_head mappings;
574 unsigned long pg_size;
575 int ret = 0;
576
577 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
578 return 0;
579
580 BUG_ON(!domain->pgsize_bitmap);
581
582 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
583 INIT_LIST_HEAD(&mappings);
584
585 iommu_get_resv_regions(dev, &mappings);
586
587
588 list_for_each_entry(entry, &mappings, list) {
589 dma_addr_t start, end, addr;
590
591 if (domain->ops->apply_resv_region)
592 domain->ops->apply_resv_region(dev, domain, entry);
593
594 start = ALIGN(entry->start, pg_size);
595 end = ALIGN(entry->start + entry->length, pg_size);
596
597 if (entry->type != IOMMU_RESV_DIRECT &&
598 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
599 continue;
600
601 for (addr = start; addr < end; addr += pg_size) {
602 phys_addr_t phys_addr;
603
604 phys_addr = iommu_iova_to_phys(domain, addr);
605 if (phys_addr)
606 continue;
607
608 ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
609 if (ret)
610 goto out;
611 }
612
613 }
614
615 iommu_flush_tlb_all(domain);
616
617out:
618 iommu_put_resv_regions(dev, &mappings);
619
620 return ret;
621}
622
623
624
625
626
627
628
629
630
631int iommu_group_add_device(struct iommu_group *group, struct device *dev)
632{
633 int ret, i = 0;
634 struct group_device *device;
635
636 device = kzalloc(sizeof(*device), GFP_KERNEL);
637 if (!device)
638 return -ENOMEM;
639
640 device->dev = dev;
641
642 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
643 if (ret)
644 goto err_free_device;
645
646 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
647rename:
648 if (!device->name) {
649 ret = -ENOMEM;
650 goto err_remove_link;
651 }
652
653 ret = sysfs_create_link_nowarn(group->devices_kobj,
654 &dev->kobj, device->name);
655 if (ret) {
656 if (ret == -EEXIST && i >= 0) {
657
658
659
660
661 kfree(device->name);
662 device->name = kasprintf(GFP_KERNEL, "%s.%d",
663 kobject_name(&dev->kobj), i++);
664 goto rename;
665 }
666 goto err_free_name;
667 }
668
669 kobject_get(group->devices_kobj);
670
671 dev->iommu_group = group;
672
673 iommu_group_create_direct_mappings(group, dev);
674
675 mutex_lock(&group->mutex);
676 list_add_tail(&device->list, &group->devices);
677 if (group->domain)
678 ret = __iommu_attach_device(group->domain, dev);
679 mutex_unlock(&group->mutex);
680 if (ret)
681 goto err_put_group;
682
683
684 blocking_notifier_call_chain(&group->notifier,
685 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
686
687 trace_add_device_to_group(group->id, dev);
688
689 dev_info(dev, "Adding to iommu group %d\n", group->id);
690
691 return 0;
692
693err_put_group:
694 mutex_lock(&group->mutex);
695 list_del(&device->list);
696 mutex_unlock(&group->mutex);
697 dev->iommu_group = NULL;
698 kobject_put(group->devices_kobj);
699err_free_name:
700 kfree(device->name);
701err_remove_link:
702 sysfs_remove_link(&dev->kobj, "iommu_group");
703err_free_device:
704 kfree(device);
705 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
706 return ret;
707}
708EXPORT_SYMBOL_GPL(iommu_group_add_device);
709
710
711
712
713
714
715
716
717void iommu_group_remove_device(struct device *dev)
718{
719 struct iommu_group *group = dev->iommu_group;
720 struct group_device *tmp_device, *device = NULL;
721
722 dev_info(dev, "Removing from iommu group %d\n", group->id);
723
724
725 blocking_notifier_call_chain(&group->notifier,
726 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
727
728 mutex_lock(&group->mutex);
729 list_for_each_entry(tmp_device, &group->devices, list) {
730 if (tmp_device->dev == dev) {
731 device = tmp_device;
732 list_del(&device->list);
733 break;
734 }
735 }
736 mutex_unlock(&group->mutex);
737
738 if (!device)
739 return;
740
741 sysfs_remove_link(group->devices_kobj, device->name);
742 sysfs_remove_link(&dev->kobj, "iommu_group");
743
744 trace_remove_device_from_group(group->id, dev);
745
746 kfree(device->name);
747 kfree(device);
748 dev->iommu_group = NULL;
749 kobject_put(group->devices_kobj);
750}
751EXPORT_SYMBOL_GPL(iommu_group_remove_device);
752
753static int iommu_group_device_count(struct iommu_group *group)
754{
755 struct group_device *entry;
756 int ret = 0;
757
758 list_for_each_entry(entry, &group->devices, list)
759 ret++;
760
761 return ret;
762}
763
764
765
766
767
768
769
770
771
772
773
774
775static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
776 int (*fn)(struct device *, void *))
777{
778 struct group_device *device;
779 int ret = 0;
780
781 list_for_each_entry(device, &group->devices, list) {
782 ret = fn(device->dev, data);
783 if (ret)
784 break;
785 }
786 return ret;
787}
788
789
790int iommu_group_for_each_dev(struct iommu_group *group, void *data,
791 int (*fn)(struct device *, void *))
792{
793 int ret;
794
795 mutex_lock(&group->mutex);
796 ret = __iommu_group_for_each_dev(group, data, fn);
797 mutex_unlock(&group->mutex);
798
799 return ret;
800}
801EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
802
803
804
805
806
807
808
809
810
811struct iommu_group *iommu_group_get(struct device *dev)
812{
813 struct iommu_group *group = dev->iommu_group;
814
815 if (group)
816 kobject_get(group->devices_kobj);
817
818 return group;
819}
820EXPORT_SYMBOL_GPL(iommu_group_get);
821
822
823
824
825
826
827
828
829struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
830{
831 kobject_get(group->devices_kobj);
832 return group;
833}
834
835
836
837
838
839
840
841
842void iommu_group_put(struct iommu_group *group)
843{
844 if (group)
845 kobject_put(group->devices_kobj);
846}
847EXPORT_SYMBOL_GPL(iommu_group_put);
848
849
850
851
852
853
854
855
856
857
858int iommu_group_register_notifier(struct iommu_group *group,
859 struct notifier_block *nb)
860{
861 return blocking_notifier_chain_register(&group->notifier, nb);
862}
863EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
864
865
866
867
868
869
870
871
872int iommu_group_unregister_notifier(struct iommu_group *group,
873 struct notifier_block *nb)
874{
875 return blocking_notifier_chain_unregister(&group->notifier, nb);
876}
877EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897int iommu_register_device_fault_handler(struct device *dev,
898 iommu_dev_fault_handler_t handler,
899 void *data)
900{
901 struct iommu_param *param = dev->iommu_param;
902 int ret = 0;
903
904 if (!param)
905 return -EINVAL;
906
907 mutex_lock(¶m->lock);
908
909 if (param->fault_param) {
910 ret = -EBUSY;
911 goto done_unlock;
912 }
913
914 get_device(dev);
915 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
916 if (!param->fault_param) {
917 put_device(dev);
918 ret = -ENOMEM;
919 goto done_unlock;
920 }
921 param->fault_param->handler = handler;
922 param->fault_param->data = data;
923 mutex_init(¶m->fault_param->lock);
924 INIT_LIST_HEAD(¶m->fault_param->faults);
925
926done_unlock:
927 mutex_unlock(¶m->lock);
928
929 return ret;
930}
931EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
932
933
934
935
936
937
938
939
940
941
942int iommu_unregister_device_fault_handler(struct device *dev)
943{
944 struct iommu_param *param = dev->iommu_param;
945 int ret = 0;
946
947 if (!param)
948 return -EINVAL;
949
950 mutex_lock(¶m->lock);
951
952 if (!param->fault_param)
953 goto unlock;
954
955
956 if (!list_empty(¶m->fault_param->faults)) {
957 ret = -EBUSY;
958 goto unlock;
959 }
960
961 kfree(param->fault_param);
962 param->fault_param = NULL;
963 put_device(dev);
964unlock:
965 mutex_unlock(¶m->lock);
966
967 return ret;
968}
969EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
970
971
972
973
974
975
976
977
978
979
980
981
982int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
983{
984 struct iommu_param *param = dev->iommu_param;
985 struct iommu_fault_event *evt_pending = NULL;
986 struct iommu_fault_param *fparam;
987 int ret = 0;
988
989 if (!param || !evt)
990 return -EINVAL;
991
992
993 mutex_lock(¶m->lock);
994 fparam = param->fault_param;
995 if (!fparam || !fparam->handler) {
996 ret = -EINVAL;
997 goto done_unlock;
998 }
999
1000 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1001 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1002 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1003 GFP_KERNEL);
1004 if (!evt_pending) {
1005 ret = -ENOMEM;
1006 goto done_unlock;
1007 }
1008 mutex_lock(&fparam->lock);
1009 list_add_tail(&evt_pending->list, &fparam->faults);
1010 mutex_unlock(&fparam->lock);
1011 }
1012
1013 ret = fparam->handler(&evt->fault, fparam->data);
1014 if (ret && evt_pending) {
1015 mutex_lock(&fparam->lock);
1016 list_del(&evt_pending->list);
1017 mutex_unlock(&fparam->lock);
1018 kfree(evt_pending);
1019 }
1020done_unlock:
1021 mutex_unlock(¶m->lock);
1022 return ret;
1023}
1024EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1025
1026int iommu_page_response(struct device *dev,
1027 struct iommu_page_response *msg)
1028{
1029 bool pasid_valid;
1030 int ret = -EINVAL;
1031 struct iommu_fault_event *evt;
1032 struct iommu_fault_page_request *prm;
1033 struct iommu_param *param = dev->iommu_param;
1034 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1035
1036 if (!domain || !domain->ops->page_response)
1037 return -ENODEV;
1038
1039 if (!param || !param->fault_param)
1040 return -EINVAL;
1041
1042 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1043 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1044 return -EINVAL;
1045
1046
1047 mutex_lock(¶m->fault_param->lock);
1048 if (list_empty(¶m->fault_param->faults)) {
1049 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1050 goto done_unlock;
1051 }
1052
1053
1054
1055
1056 list_for_each_entry(evt, ¶m->fault_param->faults, list) {
1057 prm = &evt->fault.prm;
1058 pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1059
1060 if ((pasid_valid && prm->pasid != msg->pasid) ||
1061 prm->grpid != msg->grpid)
1062 continue;
1063
1064
1065 msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1066
1067 ret = domain->ops->page_response(dev, evt, msg);
1068 list_del(&evt->list);
1069 kfree(evt);
1070 break;
1071 }
1072
1073done_unlock:
1074 mutex_unlock(¶m->fault_param->lock);
1075 return ret;
1076}
1077EXPORT_SYMBOL_GPL(iommu_page_response);
1078
1079
1080
1081
1082
1083
1084
1085int iommu_group_id(struct iommu_group *group)
1086{
1087 return group->id;
1088}
1089EXPORT_SYMBOL_GPL(iommu_group_id);
1090
1091static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1092 unsigned long *devfns);
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1103
1104
1105
1106
1107
1108
1109
1110static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1111 unsigned long *devfns)
1112{
1113 struct pci_dev *tmp = NULL;
1114 struct iommu_group *group;
1115
1116 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1117 return NULL;
1118
1119 for_each_pci_dev(tmp) {
1120 if (tmp == pdev || tmp->bus != pdev->bus ||
1121 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1122 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1123 continue;
1124
1125 group = get_pci_alias_group(tmp, devfns);
1126 if (group) {
1127 pci_dev_put(tmp);
1128 return group;
1129 }
1130 }
1131
1132 return NULL;
1133}
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1145 unsigned long *devfns)
1146{
1147 struct pci_dev *tmp = NULL;
1148 struct iommu_group *group;
1149
1150 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1151 return NULL;
1152
1153 group = iommu_group_get(&pdev->dev);
1154 if (group)
1155 return group;
1156
1157 for_each_pci_dev(tmp) {
1158 if (tmp == pdev || tmp->bus != pdev->bus)
1159 continue;
1160
1161
1162 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1163 group = get_pci_alias_group(tmp, devfns);
1164 if (group) {
1165 pci_dev_put(tmp);
1166 return group;
1167 }
1168
1169 group = get_pci_function_alias_group(tmp, devfns);
1170 if (group) {
1171 pci_dev_put(tmp);
1172 return group;
1173 }
1174 }
1175 }
1176
1177 return NULL;
1178}
1179
1180struct group_for_pci_data {
1181 struct pci_dev *pdev;
1182 struct iommu_group *group;
1183};
1184
1185
1186
1187
1188
1189static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1190{
1191 struct group_for_pci_data *data = opaque;
1192
1193 data->pdev = pdev;
1194 data->group = iommu_group_get(&pdev->dev);
1195
1196 return data->group != NULL;
1197}
1198
1199
1200
1201
1202
1203struct iommu_group *generic_device_group(struct device *dev)
1204{
1205 return iommu_group_alloc();
1206}
1207
1208
1209
1210
1211
1212struct iommu_group *pci_device_group(struct device *dev)
1213{
1214 struct pci_dev *pdev = to_pci_dev(dev);
1215 struct group_for_pci_data data;
1216 struct pci_bus *bus;
1217 struct iommu_group *group = NULL;
1218 u64 devfns[4] = { 0 };
1219
1220 if (WARN_ON(!dev_is_pci(dev)))
1221 return ERR_PTR(-EINVAL);
1222
1223
1224
1225
1226
1227
1228
1229 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1230 return data.group;
1231
1232 pdev = data.pdev;
1233
1234
1235
1236
1237
1238
1239
1240 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1241 if (!bus->self)
1242 continue;
1243
1244 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1245 break;
1246
1247 pdev = bus->self;
1248
1249 group = iommu_group_get(&pdev->dev);
1250 if (group)
1251 return group;
1252 }
1253
1254
1255
1256
1257
1258 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1259 if (group)
1260 return group;
1261
1262
1263
1264
1265
1266
1267 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1268 if (group)
1269 return group;
1270
1271
1272 return iommu_group_alloc();
1273}
1274
1275
1276struct iommu_group *fsl_mc_device_group(struct device *dev)
1277{
1278 struct device *cont_dev = fsl_mc_cont_dev(dev);
1279 struct iommu_group *group;
1280
1281 group = iommu_group_get(cont_dev);
1282 if (!group)
1283 group = iommu_group_alloc();
1284 return group;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1298{
1299 const struct iommu_ops *ops = dev->bus->iommu_ops;
1300 struct iommu_group *group;
1301 int ret;
1302
1303 group = iommu_group_get(dev);
1304 if (group)
1305 return group;
1306
1307 if (!ops)
1308 return ERR_PTR(-EINVAL);
1309
1310 group = ops->device_group(dev);
1311 if (WARN_ON_ONCE(group == NULL))
1312 return ERR_PTR(-EINVAL);
1313
1314 if (IS_ERR(group))
1315 return group;
1316
1317
1318
1319
1320
1321 if (!group->default_domain) {
1322 struct iommu_domain *dom;
1323
1324 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1325 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1326 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1327 if (dom) {
1328 dev_warn(dev,
1329 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1330 iommu_def_domain_type);
1331 }
1332 }
1333
1334 group->default_domain = dom;
1335 if (!group->domain)
1336 group->domain = dom;
1337
1338 if (dom && !iommu_dma_strict) {
1339 int attr = 1;
1340 iommu_domain_set_attr(dom,
1341 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1342 &attr);
1343 }
1344 }
1345
1346 ret = iommu_group_add_device(group, dev);
1347 if (ret) {
1348 iommu_group_put(group);
1349 return ERR_PTR(ret);
1350 }
1351
1352 return group;
1353}
1354
1355struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1356{
1357 return group->default_domain;
1358}
1359
1360static int add_iommu_group(struct device *dev, void *data)
1361{
1362 int ret = iommu_probe_device(dev);
1363
1364
1365
1366
1367
1368
1369 if (ret == -ENODEV)
1370 ret = 0;
1371
1372 return ret;
1373}
1374
1375static int remove_iommu_group(struct device *dev, void *data)
1376{
1377 iommu_release_device(dev);
1378
1379 return 0;
1380}
1381
1382static int iommu_bus_notifier(struct notifier_block *nb,
1383 unsigned long action, void *data)
1384{
1385 unsigned long group_action = 0;
1386 struct device *dev = data;
1387 struct iommu_group *group;
1388
1389
1390
1391
1392
1393 if (action == BUS_NOTIFY_ADD_DEVICE) {
1394 int ret;
1395
1396 ret = iommu_probe_device(dev);
1397 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1398 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1399 iommu_release_device(dev);
1400 return NOTIFY_OK;
1401 }
1402
1403
1404
1405
1406
1407 group = iommu_group_get(dev);
1408 if (!group)
1409 return 0;
1410
1411 switch (action) {
1412 case BUS_NOTIFY_BIND_DRIVER:
1413 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1414 break;
1415 case BUS_NOTIFY_BOUND_DRIVER:
1416 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1417 break;
1418 case BUS_NOTIFY_UNBIND_DRIVER:
1419 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1420 break;
1421 case BUS_NOTIFY_UNBOUND_DRIVER:
1422 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1423 break;
1424 }
1425
1426 if (group_action)
1427 blocking_notifier_call_chain(&group->notifier,
1428 group_action, dev);
1429
1430 iommu_group_put(group);
1431 return 0;
1432}
1433
1434static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1435{
1436 int err;
1437 struct notifier_block *nb;
1438
1439 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1440 if (!nb)
1441 return -ENOMEM;
1442
1443 nb->notifier_call = iommu_bus_notifier;
1444
1445 err = bus_register_notifier(bus, nb);
1446 if (err)
1447 goto out_free;
1448
1449 err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1450 if (err)
1451 goto out_err;
1452
1453
1454 return 0;
1455
1456out_err:
1457
1458 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1459 bus_unregister_notifier(bus, nb);
1460
1461out_free:
1462 kfree(nb);
1463
1464 return err;
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1481{
1482 int err;
1483
1484 if (bus->iommu_ops != NULL)
1485 return -EBUSY;
1486
1487 bus->iommu_ops = ops;
1488
1489
1490 err = iommu_bus_init(bus, ops);
1491 if (err)
1492 bus->iommu_ops = NULL;
1493
1494 return err;
1495}
1496EXPORT_SYMBOL_GPL(bus_set_iommu);
1497
1498bool iommu_present(struct bus_type *bus)
1499{
1500 return bus->iommu_ops != NULL;
1501}
1502EXPORT_SYMBOL_GPL(iommu_present);
1503
1504bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1505{
1506 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1507 return false;
1508
1509 return bus->iommu_ops->capable(cap);
1510}
1511EXPORT_SYMBOL_GPL(iommu_capable);
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525void iommu_set_fault_handler(struct iommu_domain *domain,
1526 iommu_fault_handler_t handler,
1527 void *token)
1528{
1529 BUG_ON(!domain);
1530
1531 domain->handler = handler;
1532 domain->handler_token = token;
1533}
1534EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1535
1536static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1537 unsigned type)
1538{
1539 struct iommu_domain *domain;
1540
1541 if (bus == NULL || bus->iommu_ops == NULL)
1542 return NULL;
1543
1544 domain = bus->iommu_ops->domain_alloc(type);
1545 if (!domain)
1546 return NULL;
1547
1548 domain->ops = bus->iommu_ops;
1549 domain->type = type;
1550
1551 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1552
1553 return domain;
1554}
1555
1556struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1557{
1558 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1559}
1560EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1561
1562void iommu_domain_free(struct iommu_domain *domain)
1563{
1564 domain->ops->domain_free(domain);
1565}
1566EXPORT_SYMBOL_GPL(iommu_domain_free);
1567
1568static int __iommu_attach_device(struct iommu_domain *domain,
1569 struct device *dev)
1570{
1571 int ret;
1572 if ((domain->ops->is_attach_deferred != NULL) &&
1573 domain->ops->is_attach_deferred(domain, dev))
1574 return 0;
1575
1576 if (unlikely(domain->ops->attach_dev == NULL))
1577 return -ENODEV;
1578
1579 ret = domain->ops->attach_dev(domain, dev);
1580 if (!ret)
1581 trace_attach_device_to_domain(dev);
1582 return ret;
1583}
1584
1585int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1586{
1587 struct iommu_group *group;
1588 int ret;
1589
1590 group = iommu_group_get(dev);
1591 if (!group)
1592 return -ENODEV;
1593
1594
1595
1596
1597
1598 mutex_lock(&group->mutex);
1599 ret = -EINVAL;
1600 if (iommu_group_device_count(group) != 1)
1601 goto out_unlock;
1602
1603 ret = __iommu_attach_group(domain, group);
1604
1605out_unlock:
1606 mutex_unlock(&group->mutex);
1607 iommu_group_put(group);
1608
1609 return ret;
1610}
1611EXPORT_SYMBOL_GPL(iommu_attach_device);
1612
1613static void __iommu_detach_device(struct iommu_domain *domain,
1614 struct device *dev)
1615{
1616 if ((domain->ops->is_attach_deferred != NULL) &&
1617 domain->ops->is_attach_deferred(domain, dev))
1618 return;
1619
1620 if (unlikely(domain->ops->detach_dev == NULL))
1621 return;
1622
1623 domain->ops->detach_dev(domain, dev);
1624 trace_detach_device_from_domain(dev);
1625}
1626
1627void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1628{
1629 struct iommu_group *group;
1630
1631 group = iommu_group_get(dev);
1632 if (!group)
1633 return;
1634
1635 mutex_lock(&group->mutex);
1636 if (iommu_group_device_count(group) != 1) {
1637 WARN_ON(1);
1638 goto out_unlock;
1639 }
1640
1641 __iommu_detach_group(domain, group);
1642
1643out_unlock:
1644 mutex_unlock(&group->mutex);
1645 iommu_group_put(group);
1646}
1647EXPORT_SYMBOL_GPL(iommu_detach_device);
1648
1649struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1650{
1651 struct iommu_domain *domain;
1652 struct iommu_group *group;
1653
1654 group = iommu_group_get(dev);
1655 if (!group)
1656 return NULL;
1657
1658 domain = group->domain;
1659
1660 iommu_group_put(group);
1661
1662 return domain;
1663}
1664EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1665
1666
1667
1668
1669
1670struct iommu_domain *iommu_get_dma_domain(struct device *dev)
1671{
1672 return dev->iommu_group->default_domain;
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685static int iommu_group_do_attach_device(struct device *dev, void *data)
1686{
1687 struct iommu_domain *domain = data;
1688
1689 return __iommu_attach_device(domain, dev);
1690}
1691
1692static int __iommu_attach_group(struct iommu_domain *domain,
1693 struct iommu_group *group)
1694{
1695 int ret;
1696
1697 if (group->default_domain && group->domain != group->default_domain)
1698 return -EBUSY;
1699
1700 ret = __iommu_group_for_each_dev(group, domain,
1701 iommu_group_do_attach_device);
1702 if (ret == 0)
1703 group->domain = domain;
1704
1705 return ret;
1706}
1707
1708int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1709{
1710 int ret;
1711
1712 mutex_lock(&group->mutex);
1713 ret = __iommu_attach_group(domain, group);
1714 mutex_unlock(&group->mutex);
1715
1716 return ret;
1717}
1718EXPORT_SYMBOL_GPL(iommu_attach_group);
1719
1720static int iommu_group_do_detach_device(struct device *dev, void *data)
1721{
1722 struct iommu_domain *domain = data;
1723
1724 __iommu_detach_device(domain, dev);
1725
1726 return 0;
1727}
1728
1729static void __iommu_detach_group(struct iommu_domain *domain,
1730 struct iommu_group *group)
1731{
1732 int ret;
1733
1734 if (!group->default_domain) {
1735 __iommu_group_for_each_dev(group, domain,
1736 iommu_group_do_detach_device);
1737 group->domain = NULL;
1738 return;
1739 }
1740
1741 if (group->domain == group->default_domain)
1742 return;
1743
1744
1745 ret = __iommu_group_for_each_dev(group, group->default_domain,
1746 iommu_group_do_attach_device);
1747 if (ret != 0)
1748 WARN_ON(1);
1749 else
1750 group->domain = group->default_domain;
1751}
1752
1753void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1754{
1755 mutex_lock(&group->mutex);
1756 __iommu_detach_group(domain, group);
1757 mutex_unlock(&group->mutex);
1758}
1759EXPORT_SYMBOL_GPL(iommu_detach_group);
1760
1761phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1762{
1763 if (unlikely(domain->ops->iova_to_phys == NULL))
1764 return 0;
1765
1766 return domain->ops->iova_to_phys(domain, iova);
1767}
1768EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1769
1770static size_t iommu_pgsize(struct iommu_domain *domain,
1771 unsigned long addr_merge, size_t size)
1772{
1773 unsigned int pgsize_idx;
1774 size_t pgsize;
1775
1776
1777 pgsize_idx = __fls(size);
1778
1779
1780 if (likely(addr_merge)) {
1781
1782 unsigned int align_pgsize_idx = __ffs(addr_merge);
1783 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1784 }
1785
1786
1787 pgsize = (1UL << (pgsize_idx + 1)) - 1;
1788
1789
1790 pgsize &= domain->pgsize_bitmap;
1791
1792
1793 BUG_ON(!pgsize);
1794
1795
1796 pgsize_idx = __fls(pgsize);
1797 pgsize = 1UL << pgsize_idx;
1798
1799 return pgsize;
1800}
1801
1802int iommu_map(struct iommu_domain *domain, unsigned long iova,
1803 phys_addr_t paddr, size_t size, int prot)
1804{
1805 const struct iommu_ops *ops = domain->ops;
1806 unsigned long orig_iova = iova;
1807 unsigned int min_pagesz;
1808 size_t orig_size = size;
1809 phys_addr_t orig_paddr = paddr;
1810 int ret = 0;
1811
1812 if (unlikely(ops->map == NULL ||
1813 domain->pgsize_bitmap == 0UL))
1814 return -ENODEV;
1815
1816 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1817 return -EINVAL;
1818
1819
1820 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1821
1822
1823
1824
1825
1826
1827 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1828 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1829 iova, &paddr, size, min_pagesz);
1830 return -EINVAL;
1831 }
1832
1833 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1834
1835 while (size) {
1836 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1837
1838 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1839 iova, &paddr, pgsize);
1840
1841 ret = ops->map(domain, iova, paddr, pgsize, prot);
1842 if (ret)
1843 break;
1844
1845 iova += pgsize;
1846 paddr += pgsize;
1847 size -= pgsize;
1848 }
1849
1850 if (ops->iotlb_sync_map)
1851 ops->iotlb_sync_map(domain);
1852
1853
1854 if (ret)
1855 iommu_unmap(domain, orig_iova, orig_size - size);
1856 else
1857 trace_map(orig_iova, orig_paddr, orig_size);
1858
1859 return ret;
1860}
1861EXPORT_SYMBOL_GPL(iommu_map);
1862
1863static size_t __iommu_unmap(struct iommu_domain *domain,
1864 unsigned long iova, size_t size,
1865 bool sync)
1866{
1867 const struct iommu_ops *ops = domain->ops;
1868 size_t unmapped_page, unmapped = 0;
1869 unsigned long orig_iova = iova;
1870 unsigned int min_pagesz;
1871
1872 if (unlikely(ops->unmap == NULL ||
1873 domain->pgsize_bitmap == 0UL))
1874 return 0;
1875
1876 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1877 return 0;
1878
1879
1880 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1881
1882
1883
1884
1885
1886
1887 if (!IS_ALIGNED(iova | size, min_pagesz)) {
1888 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1889 iova, size, min_pagesz);
1890 return 0;
1891 }
1892
1893 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1894
1895
1896
1897
1898
1899 while (unmapped < size) {
1900 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1901
1902 unmapped_page = ops->unmap(domain, iova, pgsize);
1903 if (!unmapped_page)
1904 break;
1905
1906 if (sync && ops->iotlb_range_add)
1907 ops->iotlb_range_add(domain, iova, pgsize);
1908
1909 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1910 iova, unmapped_page);
1911
1912 iova += unmapped_page;
1913 unmapped += unmapped_page;
1914 }
1915
1916 if (sync && ops->iotlb_sync)
1917 ops->iotlb_sync(domain);
1918
1919 trace_unmap(orig_iova, size, unmapped);
1920 return unmapped;
1921}
1922
1923size_t iommu_unmap(struct iommu_domain *domain,
1924 unsigned long iova, size_t size)
1925{
1926 return __iommu_unmap(domain, iova, size, true);
1927}
1928EXPORT_SYMBOL_GPL(iommu_unmap);
1929
1930size_t iommu_unmap_fast(struct iommu_domain *domain,
1931 unsigned long iova, size_t size)
1932{
1933 return __iommu_unmap(domain, iova, size, false);
1934}
1935EXPORT_SYMBOL_GPL(iommu_unmap_fast);
1936
1937size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1938 struct scatterlist *sg, unsigned int nents, int prot)
1939{
1940 size_t len = 0, mapped = 0;
1941 phys_addr_t start;
1942 unsigned int i = 0;
1943 int ret;
1944
1945 while (i <= nents) {
1946 phys_addr_t s_phys = sg_phys(sg);
1947
1948 if (len && s_phys != start + len) {
1949 ret = iommu_map(domain, iova + mapped, start, len, prot);
1950 if (ret)
1951 goto out_err;
1952
1953 mapped += len;
1954 len = 0;
1955 }
1956
1957 if (len) {
1958 len += sg->length;
1959 } else {
1960 len = sg->length;
1961 start = s_phys;
1962 }
1963
1964 if (++i < nents)
1965 sg = sg_next(sg);
1966 }
1967
1968 return mapped;
1969
1970out_err:
1971
1972 iommu_unmap(domain, iova, mapped);
1973
1974 return 0;
1975
1976}
1977EXPORT_SYMBOL_GPL(iommu_map_sg);
1978
1979int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1980 phys_addr_t paddr, u64 size, int prot)
1981{
1982 if (unlikely(domain->ops->domain_window_enable == NULL))
1983 return -ENODEV;
1984
1985 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1986 prot);
1987}
1988EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1989
1990void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1991{
1992 if (unlikely(domain->ops->domain_window_disable == NULL))
1993 return;
1994
1995 return domain->ops->domain_window_disable(domain, wnd_nr);
1996}
1997EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2024 unsigned long iova, int flags)
2025{
2026 int ret = -ENOSYS;
2027
2028
2029
2030
2031
2032 if (domain->handler)
2033 ret = domain->handler(domain, dev, iova, flags,
2034 domain->handler_token);
2035
2036 trace_io_page_fault(dev, iova, flags);
2037 return ret;
2038}
2039EXPORT_SYMBOL_GPL(report_iommu_fault);
2040
2041static int __init iommu_init(void)
2042{
2043 iommu_group_kset = kset_create_and_add("iommu_groups",
2044 NULL, kernel_kobj);
2045 BUG_ON(!iommu_group_kset);
2046
2047 iommu_debugfs_setup();
2048
2049 return 0;
2050}
2051core_initcall(iommu_init);
2052
2053int iommu_domain_get_attr(struct iommu_domain *domain,
2054 enum iommu_attr attr, void *data)
2055{
2056 struct iommu_domain_geometry *geometry;
2057 bool *paging;
2058 int ret = 0;
2059
2060 switch (attr) {
2061 case DOMAIN_ATTR_GEOMETRY:
2062 geometry = data;
2063 *geometry = domain->geometry;
2064
2065 break;
2066 case DOMAIN_ATTR_PAGING:
2067 paging = data;
2068 *paging = (domain->pgsize_bitmap != 0UL);
2069 break;
2070 default:
2071 if (!domain->ops->domain_get_attr)
2072 return -EINVAL;
2073
2074 ret = domain->ops->domain_get_attr(domain, attr, data);
2075 }
2076
2077 return ret;
2078}
2079EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2080
2081int iommu_domain_set_attr(struct iommu_domain *domain,
2082 enum iommu_attr attr, void *data)
2083{
2084 int ret = 0;
2085
2086 switch (attr) {
2087 default:
2088 if (domain->ops->domain_set_attr == NULL)
2089 return -EINVAL;
2090
2091 ret = domain->ops->domain_set_attr(domain, attr, data);
2092 }
2093
2094 return ret;
2095}
2096EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2097
2098void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2099{
2100 const struct iommu_ops *ops = dev->bus->iommu_ops;
2101
2102 if (ops && ops->get_resv_regions)
2103 ops->get_resv_regions(dev, list);
2104}
2105
2106void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2107{
2108 const struct iommu_ops *ops = dev->bus->iommu_ops;
2109
2110 if (ops && ops->put_resv_regions)
2111 ops->put_resv_regions(dev, list);
2112}
2113
2114struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2115 size_t length, int prot,
2116 enum iommu_resv_type type)
2117{
2118 struct iommu_resv_region *region;
2119
2120 region = kzalloc(sizeof(*region), GFP_KERNEL);
2121 if (!region)
2122 return NULL;
2123
2124 INIT_LIST_HEAD(®ion->list);
2125 region->start = start;
2126 region->length = length;
2127 region->prot = prot;
2128 region->type = type;
2129 return region;
2130}
2131
2132static int
2133request_default_domain_for_dev(struct device *dev, unsigned long type)
2134{
2135 struct iommu_domain *domain;
2136 struct iommu_group *group;
2137 int ret;
2138
2139
2140 group = iommu_group_get(dev);
2141 if (!group)
2142 return -EINVAL;
2143
2144 mutex_lock(&group->mutex);
2145
2146
2147 ret = 0;
2148 if (group->default_domain && group->default_domain->type == type)
2149 goto out;
2150
2151
2152 ret = -EBUSY;
2153 if (iommu_group_device_count(group) != 1)
2154 goto out;
2155
2156
2157 ret = -ENOMEM;
2158 domain = __iommu_domain_alloc(dev->bus, type);
2159 if (!domain)
2160 goto out;
2161
2162
2163 ret = __iommu_attach_group(domain, group);
2164 if (ret) {
2165 iommu_domain_free(domain);
2166 goto out;
2167 }
2168
2169 iommu_group_create_direct_mappings(group, dev);
2170
2171
2172 if (group->default_domain)
2173 iommu_domain_free(group->default_domain);
2174 group->default_domain = domain;
2175
2176 dev_info(dev, "Using iommu %s mapping\n",
2177 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
2178
2179 ret = 0;
2180out:
2181 mutex_unlock(&group->mutex);
2182 iommu_group_put(group);
2183
2184 return ret;
2185}
2186
2187
2188int iommu_request_dm_for_dev(struct device *dev)
2189{
2190 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2191}
2192
2193
2194int iommu_request_dma_domain_for_dev(struct device *dev)
2195{
2196 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2197}
2198
2199const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2200{
2201 const struct iommu_ops *ops = NULL;
2202 struct iommu_device *iommu;
2203
2204 spin_lock(&iommu_device_lock);
2205 list_for_each_entry(iommu, &iommu_device_list, list)
2206 if (iommu->fwnode == fwnode) {
2207 ops = iommu->ops;
2208 break;
2209 }
2210 spin_unlock(&iommu_device_lock);
2211 return ops;
2212}
2213
2214int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2215 const struct iommu_ops *ops)
2216{
2217 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2218
2219 if (fwspec)
2220 return ops == fwspec->ops ? 0 : -EINVAL;
2221
2222 fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
2223 if (!fwspec)
2224 return -ENOMEM;
2225
2226 of_node_get(to_of_node(iommu_fwnode));
2227 fwspec->iommu_fwnode = iommu_fwnode;
2228 fwspec->ops = ops;
2229 dev_iommu_fwspec_set(dev, fwspec);
2230 return 0;
2231}
2232EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2233
2234void iommu_fwspec_free(struct device *dev)
2235{
2236 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2237
2238 if (fwspec) {
2239 fwnode_handle_put(fwspec->iommu_fwnode);
2240 kfree(fwspec);
2241 dev_iommu_fwspec_set(dev, NULL);
2242 }
2243}
2244EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2245
2246int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2247{
2248 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2249 size_t size;
2250 int i;
2251
2252 if (!fwspec)
2253 return -EINVAL;
2254
2255 size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
2256 if (size > sizeof(*fwspec)) {
2257 fwspec = krealloc(fwspec, size, GFP_KERNEL);
2258 if (!fwspec)
2259 return -ENOMEM;
2260
2261 dev_iommu_fwspec_set(dev, fwspec);
2262 }
2263
2264 for (i = 0; i < num_ids; i++)
2265 fwspec->ids[fwspec->num_ids + i] = ids[i];
2266
2267 fwspec->num_ids += num_ids;
2268 return 0;
2269}
2270EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2271
2272
2273
2274
2275bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2276{
2277 const struct iommu_ops *ops = dev->bus->iommu_ops;
2278
2279 if (ops && ops->dev_has_feat)
2280 return ops->dev_has_feat(dev, feat);
2281
2282 return false;
2283}
2284EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2285
2286int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2287{
2288 const struct iommu_ops *ops = dev->bus->iommu_ops;
2289
2290 if (ops && ops->dev_enable_feat)
2291 return ops->dev_enable_feat(dev, feat);
2292
2293 return -ENODEV;
2294}
2295EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2296
2297
2298
2299
2300
2301
2302int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2303{
2304 const struct iommu_ops *ops = dev->bus->iommu_ops;
2305
2306 if (ops && ops->dev_disable_feat)
2307 return ops->dev_disable_feat(dev, feat);
2308
2309 return -EBUSY;
2310}
2311EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2312
2313bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2314{
2315 const struct iommu_ops *ops = dev->bus->iommu_ops;
2316
2317 if (ops && ops->dev_feat_enabled)
2318 return ops->dev_feat_enabled(dev, feat);
2319
2320 return false;
2321}
2322EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2335{
2336 int ret = -ENODEV;
2337
2338 if (domain->ops->aux_attach_dev)
2339 ret = domain->ops->aux_attach_dev(domain, dev);
2340
2341 if (!ret)
2342 trace_attach_device_to_domain(dev);
2343
2344 return ret;
2345}
2346EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2347
2348void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2349{
2350 if (domain->ops->aux_detach_dev) {
2351 domain->ops->aux_detach_dev(domain, dev);
2352 trace_detach_device_from_domain(dev);
2353 }
2354}
2355EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2356
2357int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2358{
2359 int ret = -ENODEV;
2360
2361 if (domain->ops->aux_get_pasid)
2362 ret = domain->ops->aux_get_pasid(domain, dev);
2363
2364 return ret;
2365}
2366EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383struct iommu_sva *
2384iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2385{
2386 struct iommu_group *group;
2387 struct iommu_sva *handle = ERR_PTR(-EINVAL);
2388 const struct iommu_ops *ops = dev->bus->iommu_ops;
2389
2390 if (!ops || !ops->sva_bind)
2391 return ERR_PTR(-ENODEV);
2392
2393 group = iommu_group_get(dev);
2394 if (!group)
2395 return ERR_PTR(-ENODEV);
2396
2397
2398 mutex_lock(&group->mutex);
2399
2400
2401
2402
2403
2404
2405
2406 if (iommu_group_device_count(group) != 1)
2407 goto out_unlock;
2408
2409 handle = ops->sva_bind(dev, mm, drvdata);
2410
2411out_unlock:
2412 mutex_unlock(&group->mutex);
2413 iommu_group_put(group);
2414
2415 return handle;
2416}
2417EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429void iommu_sva_unbind_device(struct iommu_sva *handle)
2430{
2431 struct iommu_group *group;
2432 struct device *dev = handle->dev;
2433 const struct iommu_ops *ops = dev->bus->iommu_ops;
2434
2435 if (!ops || !ops->sva_unbind)
2436 return;
2437
2438 group = iommu_group_get(dev);
2439 if (!group)
2440 return;
2441
2442 mutex_lock(&group->mutex);
2443 ops->sva_unbind(handle);
2444 mutex_unlock(&group->mutex);
2445
2446 iommu_group_put(group);
2447}
2448EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2449
2450int iommu_sva_set_ops(struct iommu_sva *handle,
2451 const struct iommu_sva_ops *sva_ops)
2452{
2453 if (handle->ops && handle->ops != sva_ops)
2454 return -EEXIST;
2455
2456 handle->ops = sva_ops;
2457 return 0;
2458}
2459EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2460
2461int iommu_sva_get_pasid(struct iommu_sva *handle)
2462{
2463 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2464
2465 if (!ops || !ops->sva_get_pasid)
2466 return IOMMU_PASID_INVALID;
2467
2468 return ops->sva_get_pasid(handle);
2469}
2470EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
2471