1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define pr_fmt(fmt) "%s: " fmt, __func__
20
21#include <linux/device.h>
22#include <linux/kernel.h>
23#include <linux/bug.h>
24#include <linux/types.h>
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/errno.h>
28#include <linux/iommu.h>
29#include <linux/idr.h>
30#include <linux/notifier.h>
31#include <linux/err.h>
32
33static struct kset *iommu_group_kset;
34static struct ida iommu_group_ida;
35static struct mutex iommu_group_mutex;
36
37struct iommu_group {
38 struct kobject kobj;
39 struct kobject *devices_kobj;
40 struct list_head devices;
41 struct mutex mutex;
42 struct blocking_notifier_head notifier;
43 void *iommu_data;
44 void (*iommu_data_release)(void *iommu_data);
45 char *name;
46 int id;
47};
48
49struct iommu_device {
50 struct list_head list;
51 struct device *dev;
52 char *name;
53};
54
55struct iommu_group_attribute {
56 struct attribute attr;
57 ssize_t (*show)(struct iommu_group *group, char *buf);
58 ssize_t (*store)(struct iommu_group *group,
59 const char *buf, size_t count);
60};
61
62#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
63struct iommu_group_attribute iommu_group_attr_##_name = \
64 __ATTR(_name, _mode, _show, _store)
65
66#define to_iommu_group_attr(_attr) \
67 container_of(_attr, struct iommu_group_attribute, attr)
68#define to_iommu_group(_kobj) \
69 container_of(_kobj, struct iommu_group, kobj)
70
71static ssize_t iommu_group_attr_show(struct kobject *kobj,
72 struct attribute *__attr, char *buf)
73{
74 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
75 struct iommu_group *group = to_iommu_group(kobj);
76 ssize_t ret = -EIO;
77
78 if (attr->show)
79 ret = attr->show(group, buf);
80 return ret;
81}
82
83static ssize_t iommu_group_attr_store(struct kobject *kobj,
84 struct attribute *__attr,
85 const char *buf, size_t count)
86{
87 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
88 struct iommu_group *group = to_iommu_group(kobj);
89 ssize_t ret = -EIO;
90
91 if (attr->store)
92 ret = attr->store(group, buf, count);
93 return ret;
94}
95
96static const struct sysfs_ops iommu_group_sysfs_ops = {
97 .show = iommu_group_attr_show,
98 .store = iommu_group_attr_store,
99};
100
101static int iommu_group_create_file(struct iommu_group *group,
102 struct iommu_group_attribute *attr)
103{
104 return sysfs_create_file(&group->kobj, &attr->attr);
105}
106
107static void iommu_group_remove_file(struct iommu_group *group,
108 struct iommu_group_attribute *attr)
109{
110 sysfs_remove_file(&group->kobj, &attr->attr);
111}
112
113static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
114{
115 return sprintf(buf, "%s\n", group->name);
116}
117
118static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
119
120static void iommu_group_release(struct kobject *kobj)
121{
122 struct iommu_group *group = to_iommu_group(kobj);
123
124 if (group->iommu_data_release)
125 group->iommu_data_release(group->iommu_data);
126
127 mutex_lock(&iommu_group_mutex);
128 ida_remove(&iommu_group_ida, group->id);
129 mutex_unlock(&iommu_group_mutex);
130
131 kfree(group->name);
132 kfree(group);
133}
134
135static struct kobj_type iommu_group_ktype = {
136 .sysfs_ops = &iommu_group_sysfs_ops,
137 .release = iommu_group_release,
138};
139
140
141
142
143
144
145
146
147
148
149
150
151
152struct iommu_group *iommu_group_alloc(void)
153{
154 struct iommu_group *group;
155 int ret;
156
157 group = kzalloc(sizeof(*group), GFP_KERNEL);
158 if (!group)
159 return ERR_PTR(-ENOMEM);
160
161 group->kobj.kset = iommu_group_kset;
162 mutex_init(&group->mutex);
163 INIT_LIST_HEAD(&group->devices);
164 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
165
166 mutex_lock(&iommu_group_mutex);
167
168again:
169 if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
170 kfree(group);
171 mutex_unlock(&iommu_group_mutex);
172 return ERR_PTR(-ENOMEM);
173 }
174
175 if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
176 goto again;
177
178 mutex_unlock(&iommu_group_mutex);
179
180 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
181 NULL, "%d", group->id);
182 if (ret) {
183 mutex_lock(&iommu_group_mutex);
184 ida_remove(&iommu_group_ida, group->id);
185 mutex_unlock(&iommu_group_mutex);
186 kfree(group);
187 return ERR_PTR(ret);
188 }
189
190 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
191 if (!group->devices_kobj) {
192 kobject_put(&group->kobj);
193 return ERR_PTR(-ENOMEM);
194 }
195
196
197
198
199
200
201 kobject_put(&group->kobj);
202
203 return group;
204}
205EXPORT_SYMBOL_GPL(iommu_group_alloc);
206
207struct iommu_group *iommu_group_get_by_id(int id)
208{
209 struct kobject *group_kobj;
210 struct iommu_group *group;
211 const char *name;
212
213 if (!iommu_group_kset)
214 return NULL;
215
216 name = kasprintf(GFP_KERNEL, "%d", id);
217 if (!name)
218 return NULL;
219
220 group_kobj = kset_find_obj(iommu_group_kset, name);
221 kfree(name);
222
223 if (!group_kobj)
224 return NULL;
225
226 group = container_of(group_kobj, struct iommu_group, kobj);
227 BUG_ON(group->id != id);
228
229 kobject_get(group->devices_kobj);
230 kobject_put(&group->kobj);
231
232 return group;
233}
234EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
235
236
237
238
239
240
241
242
243
244void *iommu_group_get_iommudata(struct iommu_group *group)
245{
246 return group->iommu_data;
247}
248EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
249
250
251
252
253
254
255
256
257
258
259
260void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
261 void (*release)(void *iommu_data))
262{
263 group->iommu_data = iommu_data;
264 group->iommu_data_release = release;
265}
266EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
267
268
269
270
271
272
273
274
275
276int iommu_group_set_name(struct iommu_group *group, const char *name)
277{
278 int ret;
279
280 if (group->name) {
281 iommu_group_remove_file(group, &iommu_group_attr_name);
282 kfree(group->name);
283 group->name = NULL;
284 if (!name)
285 return 0;
286 }
287
288 group->name = kstrdup(name, GFP_KERNEL);
289 if (!group->name)
290 return -ENOMEM;
291
292 ret = iommu_group_create_file(group, &iommu_group_attr_name);
293 if (ret) {
294 kfree(group->name);
295 group->name = NULL;
296 return ret;
297 }
298
299 return 0;
300}
301EXPORT_SYMBOL_GPL(iommu_group_set_name);
302
303
304
305
306
307
308
309
310
311int iommu_group_add_device(struct iommu_group *group, struct device *dev)
312{
313 int ret, i = 0;
314 struct iommu_device *device;
315
316 device = kzalloc(sizeof(*device), GFP_KERNEL);
317 if (!device)
318 return -ENOMEM;
319
320 device->dev = dev;
321
322 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
323 if (ret) {
324 kfree(device);
325 return ret;
326 }
327
328 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
329rename:
330 if (!device->name) {
331 sysfs_remove_link(&dev->kobj, "iommu_group");
332 kfree(device);
333 return -ENOMEM;
334 }
335
336 ret = sysfs_create_link_nowarn(group->devices_kobj,
337 &dev->kobj, device->name);
338 if (ret) {
339 kfree(device->name);
340 if (ret == -EEXIST && i >= 0) {
341
342
343
344
345 device->name = kasprintf(GFP_KERNEL, "%s.%d",
346 kobject_name(&dev->kobj), i++);
347 goto rename;
348 }
349
350 sysfs_remove_link(&dev->kobj, "iommu_group");
351 kfree(device);
352 return ret;
353 }
354
355 kobject_get(group->devices_kobj);
356
357 dev->iommu_group = group;
358
359 mutex_lock(&group->mutex);
360 list_add_tail(&device->list, &group->devices);
361 mutex_unlock(&group->mutex);
362
363
364 blocking_notifier_call_chain(&group->notifier,
365 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
366 return 0;
367}
368EXPORT_SYMBOL_GPL(iommu_group_add_device);
369
370
371
372
373
374
375
376
377void iommu_group_remove_device(struct device *dev)
378{
379 struct iommu_group *group = dev->iommu_group;
380 struct iommu_device *tmp_device, *device = NULL;
381
382
383 blocking_notifier_call_chain(&group->notifier,
384 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
385
386 mutex_lock(&group->mutex);
387 list_for_each_entry(tmp_device, &group->devices, list) {
388 if (tmp_device->dev == dev) {
389 device = tmp_device;
390 list_del(&device->list);
391 break;
392 }
393 }
394 mutex_unlock(&group->mutex);
395
396 if (!device)
397 return;
398
399 sysfs_remove_link(group->devices_kobj, device->name);
400 sysfs_remove_link(&dev->kobj, "iommu_group");
401
402 kfree(device->name);
403 kfree(device);
404 dev->iommu_group = NULL;
405 kobject_put(group->devices_kobj);
406}
407EXPORT_SYMBOL_GPL(iommu_group_remove_device);
408
409
410
411
412
413
414
415
416
417
418
419
420int iommu_group_for_each_dev(struct iommu_group *group, void *data,
421 int (*fn)(struct device *, void *))
422{
423 struct iommu_device *device;
424 int ret = 0;
425
426 mutex_lock(&group->mutex);
427 list_for_each_entry(device, &group->devices, list) {
428 ret = fn(device->dev, data);
429 if (ret)
430 break;
431 }
432 mutex_unlock(&group->mutex);
433 return ret;
434}
435EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
436
437
438
439
440
441
442
443
444
445struct iommu_group *iommu_group_get(struct device *dev)
446{
447 struct iommu_group *group = dev->iommu_group;
448
449 if (group)
450 kobject_get(group->devices_kobj);
451
452 return group;
453}
454EXPORT_SYMBOL_GPL(iommu_group_get);
455
456
457
458
459
460
461
462
463void iommu_group_put(struct iommu_group *group)
464{
465 if (group)
466 kobject_put(group->devices_kobj);
467}
468EXPORT_SYMBOL_GPL(iommu_group_put);
469
470
471
472
473
474
475
476
477
478
479int iommu_group_register_notifier(struct iommu_group *group,
480 struct notifier_block *nb)
481{
482 return blocking_notifier_chain_register(&group->notifier, nb);
483}
484EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
485
486
487
488
489
490
491
492
493int iommu_group_unregister_notifier(struct iommu_group *group,
494 struct notifier_block *nb)
495{
496 return blocking_notifier_chain_unregister(&group->notifier, nb);
497}
498EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
499
500
501
502
503
504
505
506int iommu_group_id(struct iommu_group *group)
507{
508 return group->id;
509}
510EXPORT_SYMBOL_GPL(iommu_group_id);
511
512static int add_iommu_group(struct device *dev, void *data)
513{
514 struct iommu_ops *ops = data;
515
516 if (!ops->add_device)
517 return -ENODEV;
518
519 WARN_ON(dev->iommu_group);
520
521 ops->add_device(dev);
522
523 return 0;
524}
525
526static int iommu_bus_notifier(struct notifier_block *nb,
527 unsigned long action, void *data)
528{
529 struct device *dev = data;
530 struct iommu_ops *ops = dev->bus->iommu_ops;
531 struct iommu_group *group;
532 unsigned long group_action = 0;
533
534
535
536
537
538 if (action == BUS_NOTIFY_ADD_DEVICE) {
539 if (ops->add_device)
540 return ops->add_device(dev);
541 } else if (action == BUS_NOTIFY_DEL_DEVICE) {
542 if (ops->remove_device && dev->iommu_group) {
543 ops->remove_device(dev);
544 return 0;
545 }
546 }
547
548
549
550
551
552 group = iommu_group_get(dev);
553 if (!group)
554 return 0;
555
556 switch (action) {
557 case BUS_NOTIFY_BIND_DRIVER:
558 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
559 break;
560 case BUS_NOTIFY_BOUND_DRIVER:
561 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
562 break;
563 case BUS_NOTIFY_UNBIND_DRIVER:
564 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
565 break;
566 case BUS_NOTIFY_UNBOUND_DRIVER:
567 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
568 break;
569 }
570
571 if (group_action)
572 blocking_notifier_call_chain(&group->notifier,
573 group_action, dev);
574
575 iommu_group_put(group);
576 return 0;
577}
578
579static struct notifier_block iommu_bus_nb = {
580 .notifier_call = iommu_bus_notifier,
581};
582
583static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
584{
585 bus_register_notifier(bus, &iommu_bus_nb);
586 bus_for_each_dev(bus, NULL, ops, add_iommu_group);
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
603{
604 if (bus->iommu_ops != NULL)
605 return -EBUSY;
606
607 bus->iommu_ops = ops;
608
609
610 iommu_bus_init(bus, ops);
611
612 return 0;
613}
614EXPORT_SYMBOL_GPL(bus_set_iommu);
615
616bool iommu_present(struct bus_type *bus)
617{
618 return bus->iommu_ops != NULL;
619}
620EXPORT_SYMBOL_GPL(iommu_present);
621
622
623
624
625
626
627
628
629
630
631
632
633
634void iommu_set_fault_handler(struct iommu_domain *domain,
635 iommu_fault_handler_t handler,
636 void *token)
637{
638 BUG_ON(!domain);
639
640 domain->handler = handler;
641 domain->handler_token = token;
642}
643EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
644
645struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
646{
647 struct iommu_domain *domain;
648 int ret;
649
650 if (bus == NULL || bus->iommu_ops == NULL)
651 return NULL;
652
653 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
654 if (!domain)
655 return NULL;
656
657 domain->ops = bus->iommu_ops;
658
659 ret = domain->ops->domain_init(domain);
660 if (ret)
661 goto out_free;
662
663 return domain;
664
665out_free:
666 kfree(domain);
667
668 return NULL;
669}
670EXPORT_SYMBOL_GPL(iommu_domain_alloc);
671
672void iommu_domain_free(struct iommu_domain *domain)
673{
674 if (likely(domain->ops->domain_destroy != NULL))
675 domain->ops->domain_destroy(domain);
676
677 kfree(domain);
678}
679EXPORT_SYMBOL_GPL(iommu_domain_free);
680
681int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
682{
683 if (unlikely(domain->ops->attach_dev == NULL))
684 return -ENODEV;
685
686 return domain->ops->attach_dev(domain, dev);
687}
688EXPORT_SYMBOL_GPL(iommu_attach_device);
689
690void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
691{
692 if (unlikely(domain->ops->detach_dev == NULL))
693 return;
694
695 domain->ops->detach_dev(domain, dev);
696}
697EXPORT_SYMBOL_GPL(iommu_detach_device);
698
699
700
701
702
703
704
705
706
707
708
709static int iommu_group_do_attach_device(struct device *dev, void *data)
710{
711 struct iommu_domain *domain = data;
712
713 return iommu_attach_device(domain, dev);
714}
715
716int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
717{
718 return iommu_group_for_each_dev(group, domain,
719 iommu_group_do_attach_device);
720}
721EXPORT_SYMBOL_GPL(iommu_attach_group);
722
723static int iommu_group_do_detach_device(struct device *dev, void *data)
724{
725 struct iommu_domain *domain = data;
726
727 iommu_detach_device(domain, dev);
728
729 return 0;
730}
731
732void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
733{
734 iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
735}
736EXPORT_SYMBOL_GPL(iommu_detach_group);
737
738phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
739{
740 if (unlikely(domain->ops->iova_to_phys == NULL))
741 return 0;
742
743 return domain->ops->iova_to_phys(domain, iova);
744}
745EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
746
747int iommu_domain_has_cap(struct iommu_domain *domain,
748 unsigned long cap)
749{
750 if (unlikely(domain->ops->domain_has_cap == NULL))
751 return 0;
752
753 return domain->ops->domain_has_cap(domain, cap);
754}
755EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
756
757static size_t iommu_pgsize(struct iommu_domain *domain,
758 unsigned long addr_merge, size_t size)
759{
760 unsigned int pgsize_idx;
761 size_t pgsize;
762
763
764 pgsize_idx = __fls(size);
765
766
767 if (likely(addr_merge)) {
768
769 unsigned int align_pgsize_idx = __ffs(addr_merge);
770 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
771 }
772
773
774 pgsize = (1UL << (pgsize_idx + 1)) - 1;
775
776
777 pgsize &= domain->ops->pgsize_bitmap;
778
779
780 BUG_ON(!pgsize);
781
782
783 pgsize_idx = __fls(pgsize);
784 pgsize = 1UL << pgsize_idx;
785
786 return pgsize;
787}
788
789int iommu_map(struct iommu_domain *domain, unsigned long iova,
790 phys_addr_t paddr, size_t size, int prot)
791{
792 unsigned long orig_iova = iova;
793 unsigned int min_pagesz;
794 size_t orig_size = size;
795 int ret = 0;
796
797 if (unlikely(domain->ops->unmap == NULL ||
798 domain->ops->pgsize_bitmap == 0UL))
799 return -ENODEV;
800
801
802 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
803
804
805
806
807
808
809 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
810 pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
811 iova, &paddr, size, min_pagesz);
812 return -EINVAL;
813 }
814
815 pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
816
817 while (size) {
818 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
819
820 pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
821 iova, &paddr, pgsize);
822
823 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
824 if (ret)
825 break;
826
827 iova += pgsize;
828 paddr += pgsize;
829 size -= pgsize;
830 }
831
832
833 if (ret)
834 iommu_unmap(domain, orig_iova, orig_size - size);
835
836 return ret;
837}
838EXPORT_SYMBOL_GPL(iommu_map);
839
840size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
841{
842 size_t unmapped_page, unmapped = 0;
843 unsigned int min_pagesz;
844
845 if (unlikely(domain->ops->unmap == NULL ||
846 domain->ops->pgsize_bitmap == 0UL))
847 return -ENODEV;
848
849
850 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
851
852
853
854
855
856
857 if (!IS_ALIGNED(iova | size, min_pagesz)) {
858 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
859 iova, size, min_pagesz);
860 return -EINVAL;
861 }
862
863 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
864
865
866
867
868
869 while (unmapped < size) {
870 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
871
872 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
873 if (!unmapped_page)
874 break;
875
876 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
877 iova, unmapped_page);
878
879 iova += unmapped_page;
880 unmapped += unmapped_page;
881 }
882
883 return unmapped;
884}
885EXPORT_SYMBOL_GPL(iommu_unmap);
886
887
888int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
889 phys_addr_t paddr, u64 size, int prot)
890{
891 if (unlikely(domain->ops->domain_window_enable == NULL))
892 return -ENODEV;
893
894 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
895 prot);
896}
897EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
898
899void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
900{
901 if (unlikely(domain->ops->domain_window_disable == NULL))
902 return;
903
904 return domain->ops->domain_window_disable(domain, wnd_nr);
905}
906EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
907
908static int __init iommu_init(void)
909{
910 iommu_group_kset = kset_create_and_add("iommu_groups",
911 NULL, kernel_kobj);
912 ida_init(&iommu_group_ida);
913 mutex_init(&iommu_group_mutex);
914
915 BUG_ON(!iommu_group_kset);
916
917 return 0;
918}
919arch_initcall(iommu_init);
920
921int iommu_domain_get_attr(struct iommu_domain *domain,
922 enum iommu_attr attr, void *data)
923{
924 struct iommu_domain_geometry *geometry;
925 bool *paging;
926 int ret = 0;
927 u32 *count;
928
929 switch (attr) {
930 case DOMAIN_ATTR_GEOMETRY:
931 geometry = data;
932 *geometry = domain->geometry;
933
934 break;
935 case DOMAIN_ATTR_PAGING:
936 paging = data;
937 *paging = (domain->ops->pgsize_bitmap != 0UL);
938 break;
939 case DOMAIN_ATTR_WINDOWS:
940 count = data;
941
942 if (domain->ops->domain_get_windows != NULL)
943 *count = domain->ops->domain_get_windows(domain);
944 else
945 ret = -ENODEV;
946
947 break;
948 default:
949 if (!domain->ops->domain_get_attr)
950 return -EINVAL;
951
952 ret = domain->ops->domain_get_attr(domain, attr, data);
953 }
954
955 return ret;
956}
957EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
958
959int iommu_domain_set_attr(struct iommu_domain *domain,
960 enum iommu_attr attr, void *data)
961{
962 int ret = 0;
963 u32 *count;
964
965 switch (attr) {
966 case DOMAIN_ATTR_WINDOWS:
967 count = data;
968
969 if (domain->ops->domain_set_windows != NULL)
970 ret = domain->ops->domain_set_windows(domain, *count);
971 else
972 ret = -ENODEV;
973
974 break;
975 default:
976 if (domain->ops->domain_set_attr == NULL)
977 return -EINVAL;
978
979 ret = domain->ops->domain_set_attr(domain, attr, data);
980 }
981
982 return ret;
983}
984EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
985