1
2
3
4
5
6
7#ifndef __LINUX_IOMMU_H
8#define __LINUX_IOMMU_H
9
10#include <linux/scatterlist.h>
11#include <linux/device.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/of.h>
16#include <linux/ioasid.h>
17#include <uapi/linux/iommu.h>
18
19#define IOMMU_READ (1 << 0)
20#define IOMMU_WRITE (1 << 1)
21#define IOMMU_CACHE (1 << 2)
22#define IOMMU_NOEXEC (1 << 3)
23#define IOMMU_MMIO (1 << 4)
24
25
26
27
28
29
30
31
32
33#define IOMMU_PRIV (1 << 5)
34
35struct iommu_ops;
36struct iommu_group;
37struct bus_type;
38struct device;
39struct iommu_domain;
40struct notifier_block;
41struct iommu_sva;
42struct iommu_fault_event;
43struct iommu_dma_cookie;
44
45
46#define IOMMU_FAULT_READ 0x0
47#define IOMMU_FAULT_WRITE 0x1
48
49typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
50 struct device *, unsigned long, int, void *);
51typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
52
53struct iommu_domain_geometry {
54 dma_addr_t aperture_start;
55 dma_addr_t aperture_end;
56 bool force_aperture;
57};
58
59
60#define __IOMMU_DOMAIN_PAGING (1U << 0)
61#define __IOMMU_DOMAIN_DMA_API (1U << 1)
62
63#define __IOMMU_DOMAIN_PT (1U << 2)
64#define __IOMMU_DOMAIN_DMA_FQ (1U << 3)
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define IOMMU_DOMAIN_BLOCKED (0U)
81#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
82#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
83#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
84 __IOMMU_DOMAIN_DMA_API)
85#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
86 __IOMMU_DOMAIN_DMA_API | \
87 __IOMMU_DOMAIN_DMA_FQ)
88
89struct iommu_domain {
90 unsigned type;
91 const struct iommu_ops *ops;
92 unsigned long pgsize_bitmap;
93 iommu_fault_handler_t handler;
94 void *handler_token;
95 struct iommu_domain_geometry geometry;
96 struct iommu_dma_cookie *iova_cookie;
97};
98
99static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
100{
101 return domain->type & __IOMMU_DOMAIN_DMA_API;
102}
103
104enum iommu_cap {
105 IOMMU_CAP_CACHE_COHERENCY,
106
107 IOMMU_CAP_INTR_REMAP,
108 IOMMU_CAP_NOEXEC,
109};
110
111
112enum iommu_resv_type {
113
114 IOMMU_RESV_DIRECT,
115
116
117
118
119
120 IOMMU_RESV_DIRECT_RELAXABLE,
121
122 IOMMU_RESV_RESERVED,
123
124 IOMMU_RESV_MSI,
125
126 IOMMU_RESV_SW_MSI,
127};
128
129
130
131
132
133
134
135
136
137struct iommu_resv_region {
138 struct list_head list;
139 phys_addr_t start;
140 size_t length;
141 int prot;
142 enum iommu_resv_type type;
143};
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159enum iommu_dev_features {
160 IOMMU_DEV_FEAT_AUX,
161 IOMMU_DEV_FEAT_SVA,
162 IOMMU_DEV_FEAT_IOPF,
163};
164
165#define IOMMU_PASID_INVALID (-1U)
166
167#ifdef CONFIG_IOMMU_API
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185struct iommu_iotlb_gather {
186 unsigned long start;
187 unsigned long end;
188 size_t pgsize;
189 struct page *freelist;
190 bool queued;
191};
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242struct iommu_ops {
243 bool (*capable)(enum iommu_cap);
244
245
246 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
247 void (*domain_free)(struct iommu_domain *);
248
249 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
250 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
251 int (*map)(struct iommu_domain *domain, unsigned long iova,
252 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
253 int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
254 phys_addr_t paddr, size_t pgsize, size_t pgcount,
255 int prot, gfp_t gfp, size_t *mapped);
256 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
257 size_t size, struct iommu_iotlb_gather *iotlb_gather);
258 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
259 size_t pgsize, size_t pgcount,
260 struct iommu_iotlb_gather *iotlb_gather);
261 void (*flush_iotlb_all)(struct iommu_domain *domain);
262 void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
263 size_t size);
264 void (*iotlb_sync)(struct iommu_domain *domain,
265 struct iommu_iotlb_gather *iotlb_gather);
266 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
267 struct iommu_device *(*probe_device)(struct device *dev);
268 void (*release_device)(struct device *dev);
269 void (*probe_finalize)(struct device *dev);
270 struct iommu_group *(*device_group)(struct device *dev);
271 int (*enable_nesting)(struct iommu_domain *domain);
272 int (*set_pgtable_quirks)(struct iommu_domain *domain,
273 unsigned long quirks);
274
275
276 void (*get_resv_regions)(struct device *dev, struct list_head *list);
277 void (*put_resv_regions)(struct device *dev, struct list_head *list);
278 void (*apply_resv_region)(struct device *dev,
279 struct iommu_domain *domain,
280 struct iommu_resv_region *region);
281
282 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
283 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
284
285
286 bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
287 bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
288 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
289 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
290
291
292 int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
293 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
294 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
295
296 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
297 void *drvdata);
298 void (*sva_unbind)(struct iommu_sva *handle);
299 u32 (*sva_get_pasid)(struct iommu_sva *handle);
300
301 int (*page_response)(struct device *dev,
302 struct iommu_fault_event *evt,
303 struct iommu_page_response *msg);
304 int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
305 struct iommu_cache_invalidate_info *inv_info);
306 int (*sva_bind_gpasid)(struct iommu_domain *domain,
307 struct device *dev, struct iommu_gpasid_bind_data *data);
308
309 int (*sva_unbind_gpasid)(struct device *dev, u32 pasid);
310
311 int (*def_domain_type)(struct device *dev);
312
313 unsigned long pgsize_bitmap;
314 struct module *owner;
315};
316
317
318
319
320
321
322
323
324struct iommu_device {
325 struct list_head list;
326 const struct iommu_ops *ops;
327 struct fwnode_handle *fwnode;
328 struct device *dev;
329};
330
331
332
333
334
335
336
337
338
339
340struct iommu_fault_event {
341 struct iommu_fault fault;
342 struct list_head list;
343};
344
345
346
347
348
349
350
351
352struct iommu_fault_param {
353 iommu_dev_fault_handler_t handler;
354 void *data;
355 struct list_head faults;
356 struct mutex lock;
357};
358
359
360
361
362
363
364
365
366
367
368
369
370
371struct dev_iommu {
372 struct mutex lock;
373 struct iommu_fault_param *fault_param;
374 struct iopf_device_param *iopf_param;
375 struct iommu_fwspec *fwspec;
376 struct iommu_device *iommu_dev;
377 void *priv;
378};
379
380int iommu_device_register(struct iommu_device *iommu,
381 const struct iommu_ops *ops,
382 struct device *hwdev);
383void iommu_device_unregister(struct iommu_device *iommu);
384int iommu_device_sysfs_add(struct iommu_device *iommu,
385 struct device *parent,
386 const struct attribute_group **groups,
387 const char *fmt, ...) __printf(4, 5);
388void iommu_device_sysfs_remove(struct iommu_device *iommu);
389int iommu_device_link(struct iommu_device *iommu, struct device *link);
390void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
391int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
392
393static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
394{
395 return (struct iommu_device *)dev_get_drvdata(dev);
396}
397
398static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
399{
400 *gather = (struct iommu_iotlb_gather) {
401 .start = ULONG_MAX,
402 };
403}
404
405#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1
406#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2
407#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3
408#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4
409#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5
410#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6
411
412extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
413extern int bus_iommu_probe(struct bus_type *bus);
414extern bool iommu_present(struct bus_type *bus);
415extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
416extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
417extern struct iommu_group *iommu_group_get_by_id(int id);
418extern void iommu_domain_free(struct iommu_domain *domain);
419extern int iommu_attach_device(struct iommu_domain *domain,
420 struct device *dev);
421extern void iommu_detach_device(struct iommu_domain *domain,
422 struct device *dev);
423extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain,
424 struct device *dev,
425 void __user *uinfo);
426
427extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
428 struct device *dev, void __user *udata);
429extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
430 struct device *dev, void __user *udata);
431extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
432 struct device *dev, ioasid_t pasid);
433extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
434extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
435extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
436 phys_addr_t paddr, size_t size, int prot);
437extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
438 phys_addr_t paddr, size_t size, int prot);
439extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
440 size_t size);
441extern size_t iommu_unmap_fast(struct iommu_domain *domain,
442 unsigned long iova, size_t size,
443 struct iommu_iotlb_gather *iotlb_gather);
444extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
445 struct scatterlist *sg, unsigned int nents, int prot);
446extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
447 unsigned long iova, struct scatterlist *sg,
448 unsigned int nents, int prot);
449extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
450extern void iommu_set_fault_handler(struct iommu_domain *domain,
451 iommu_fault_handler_t handler, void *token);
452
453extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
454extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
455extern void generic_iommu_put_resv_regions(struct device *dev,
456 struct list_head *list);
457extern void iommu_set_default_passthrough(bool cmd_line);
458extern void iommu_set_default_translated(bool cmd_line);
459extern bool iommu_default_passthrough(void);
460extern struct iommu_resv_region *
461iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
462 enum iommu_resv_type type);
463extern int iommu_get_group_resv_regions(struct iommu_group *group,
464 struct list_head *head);
465
466extern int iommu_attach_group(struct iommu_domain *domain,
467 struct iommu_group *group);
468extern void iommu_detach_group(struct iommu_domain *domain,
469 struct iommu_group *group);
470extern struct iommu_group *iommu_group_alloc(void);
471extern void *iommu_group_get_iommudata(struct iommu_group *group);
472extern void iommu_group_set_iommudata(struct iommu_group *group,
473 void *iommu_data,
474 void (*release)(void *iommu_data));
475extern int iommu_group_set_name(struct iommu_group *group, const char *name);
476extern int iommu_group_add_device(struct iommu_group *group,
477 struct device *dev);
478extern void iommu_group_remove_device(struct device *dev);
479extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
480 int (*fn)(struct device *, void *));
481extern struct iommu_group *iommu_group_get(struct device *dev);
482extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
483extern void iommu_group_put(struct iommu_group *group);
484extern int iommu_group_register_notifier(struct iommu_group *group,
485 struct notifier_block *nb);
486extern int iommu_group_unregister_notifier(struct iommu_group *group,
487 struct notifier_block *nb);
488extern int iommu_register_device_fault_handler(struct device *dev,
489 iommu_dev_fault_handler_t handler,
490 void *data);
491
492extern int iommu_unregister_device_fault_handler(struct device *dev);
493
494extern int iommu_report_device_fault(struct device *dev,
495 struct iommu_fault_event *evt);
496extern int iommu_page_response(struct device *dev,
497 struct iommu_page_response *msg);
498
499extern int iommu_group_id(struct iommu_group *group);
500extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
501
502int iommu_enable_nesting(struct iommu_domain *domain);
503int iommu_set_pgtable_quirks(struct iommu_domain *domain,
504 unsigned long quirks);
505
506void iommu_set_dma_strict(void);
507
508extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
509 unsigned long iova, int flags);
510
511static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
512{
513 if (domain->ops->flush_iotlb_all)
514 domain->ops->flush_iotlb_all(domain);
515}
516
517static inline void iommu_iotlb_sync(struct iommu_domain *domain,
518 struct iommu_iotlb_gather *iotlb_gather)
519{
520 if (domain->ops->iotlb_sync)
521 domain->ops->iotlb_sync(domain, iotlb_gather);
522
523 iommu_iotlb_gather_init(iotlb_gather);
524}
525
526
527
528
529
530
531
532
533
534
535
536
537static inline
538bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
539 unsigned long iova, size_t size)
540{
541 unsigned long start = iova, end = start + size - 1;
542
543 return gather->end != 0 &&
544 (end + 1 < gather->start || start > gather->end + 1);
545}
546
547
548
549
550
551
552
553
554
555
556
557
558static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
559 unsigned long iova, size_t size)
560{
561 unsigned long end = iova + size - 1;
562
563 if (gather->start > iova)
564 gather->start = iova;
565 if (gather->end < end)
566 gather->end = end;
567}
568
569
570
571
572
573
574
575
576
577
578
579
580static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
581 struct iommu_iotlb_gather *gather,
582 unsigned long iova, size_t size)
583{
584
585
586
587
588
589 if ((gather->pgsize && gather->pgsize != size) ||
590 iommu_iotlb_gather_is_disjoint(gather, iova, size))
591 iommu_iotlb_sync(domain, gather);
592
593 gather->pgsize = size;
594 iommu_iotlb_gather_add_range(gather, iova, size);
595}
596
597static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
598{
599 return gather && gather->queued;
600}
601
602
603extern struct iommu_group *pci_device_group(struct device *dev);
604
605extern struct iommu_group *generic_device_group(struct device *dev);
606
607struct iommu_group *fsl_mc_device_group(struct device *dev);
608
609
610
611
612
613
614
615
616
617struct iommu_fwspec {
618 const struct iommu_ops *ops;
619 struct fwnode_handle *iommu_fwnode;
620 u32 flags;
621 unsigned int num_ids;
622 u32 ids[];
623};
624
625
626#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
627
628
629
630
631struct iommu_sva {
632 struct device *dev;
633};
634
635int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
636 const struct iommu_ops *ops);
637void iommu_fwspec_free(struct device *dev);
638int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
639const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
640
641static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
642{
643 if (dev->iommu)
644 return dev->iommu->fwspec;
645 else
646 return NULL;
647}
648
649static inline void dev_iommu_fwspec_set(struct device *dev,
650 struct iommu_fwspec *fwspec)
651{
652 dev->iommu->fwspec = fwspec;
653}
654
655static inline void *dev_iommu_priv_get(struct device *dev)
656{
657 if (dev->iommu)
658 return dev->iommu->priv;
659 else
660 return NULL;
661}
662
663static inline void dev_iommu_priv_set(struct device *dev, void *priv)
664{
665 dev->iommu->priv = priv;
666}
667
668int iommu_probe_device(struct device *dev);
669void iommu_release_device(struct device *dev);
670
671int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
672int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
673bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
674int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
675void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
676int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
677
678struct iommu_sva *iommu_sva_bind_device(struct device *dev,
679 struct mm_struct *mm,
680 void *drvdata);
681void iommu_sva_unbind_device(struct iommu_sva *handle);
682u32 iommu_sva_get_pasid(struct iommu_sva *handle);
683
684#else
685
686struct iommu_ops {};
687struct iommu_group {};
688struct iommu_fwspec {};
689struct iommu_device {};
690struct iommu_fault_param {};
691struct iommu_iotlb_gather {};
692
693static inline bool iommu_present(struct bus_type *bus)
694{
695 return false;
696}
697
698static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
699{
700 return false;
701}
702
703static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
704{
705 return NULL;
706}
707
708static inline struct iommu_group *iommu_group_get_by_id(int id)
709{
710 return NULL;
711}
712
713static inline void iommu_domain_free(struct iommu_domain *domain)
714{
715}
716
717static inline int iommu_attach_device(struct iommu_domain *domain,
718 struct device *dev)
719{
720 return -ENODEV;
721}
722
723static inline void iommu_detach_device(struct iommu_domain *domain,
724 struct device *dev)
725{
726}
727
728static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
729{
730 return NULL;
731}
732
733static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
734 phys_addr_t paddr, size_t size, int prot)
735{
736 return -ENODEV;
737}
738
739static inline int iommu_map_atomic(struct iommu_domain *domain,
740 unsigned long iova, phys_addr_t paddr,
741 size_t size, int prot)
742{
743 return -ENODEV;
744}
745
746static inline size_t iommu_unmap(struct iommu_domain *domain,
747 unsigned long iova, size_t size)
748{
749 return 0;
750}
751
752static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
753 unsigned long iova, int gfp_order,
754 struct iommu_iotlb_gather *iotlb_gather)
755{
756 return 0;
757}
758
759static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
760 unsigned long iova, struct scatterlist *sg,
761 unsigned int nents, int prot)
762{
763 return -ENODEV;
764}
765
766static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
767 unsigned long iova, struct scatterlist *sg,
768 unsigned int nents, int prot)
769{
770 return -ENODEV;
771}
772
773static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
774{
775}
776
777static inline void iommu_iotlb_sync(struct iommu_domain *domain,
778 struct iommu_iotlb_gather *iotlb_gather)
779{
780}
781
782static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
783{
784 return 0;
785}
786
787static inline void iommu_set_fault_handler(struct iommu_domain *domain,
788 iommu_fault_handler_t handler, void *token)
789{
790}
791
792static inline void iommu_get_resv_regions(struct device *dev,
793 struct list_head *list)
794{
795}
796
797static inline void iommu_put_resv_regions(struct device *dev,
798 struct list_head *list)
799{
800}
801
802static inline int iommu_get_group_resv_regions(struct iommu_group *group,
803 struct list_head *head)
804{
805 return -ENODEV;
806}
807
808static inline void iommu_set_default_passthrough(bool cmd_line)
809{
810}
811
812static inline void iommu_set_default_translated(bool cmd_line)
813{
814}
815
816static inline bool iommu_default_passthrough(void)
817{
818 return true;
819}
820
821static inline int iommu_attach_group(struct iommu_domain *domain,
822 struct iommu_group *group)
823{
824 return -ENODEV;
825}
826
827static inline void iommu_detach_group(struct iommu_domain *domain,
828 struct iommu_group *group)
829{
830}
831
832static inline struct iommu_group *iommu_group_alloc(void)
833{
834 return ERR_PTR(-ENODEV);
835}
836
837static inline void *iommu_group_get_iommudata(struct iommu_group *group)
838{
839 return NULL;
840}
841
842static inline void iommu_group_set_iommudata(struct iommu_group *group,
843 void *iommu_data,
844 void (*release)(void *iommu_data))
845{
846}
847
848static inline int iommu_group_set_name(struct iommu_group *group,
849 const char *name)
850{
851 return -ENODEV;
852}
853
854static inline int iommu_group_add_device(struct iommu_group *group,
855 struct device *dev)
856{
857 return -ENODEV;
858}
859
860static inline void iommu_group_remove_device(struct device *dev)
861{
862}
863
864static inline int iommu_group_for_each_dev(struct iommu_group *group,
865 void *data,
866 int (*fn)(struct device *, void *))
867{
868 return -ENODEV;
869}
870
871static inline struct iommu_group *iommu_group_get(struct device *dev)
872{
873 return NULL;
874}
875
876static inline void iommu_group_put(struct iommu_group *group)
877{
878}
879
880static inline int iommu_group_register_notifier(struct iommu_group *group,
881 struct notifier_block *nb)
882{
883 return -ENODEV;
884}
885
886static inline int iommu_group_unregister_notifier(struct iommu_group *group,
887 struct notifier_block *nb)
888{
889 return 0;
890}
891
892static inline
893int iommu_register_device_fault_handler(struct device *dev,
894 iommu_dev_fault_handler_t handler,
895 void *data)
896{
897 return -ENODEV;
898}
899
900static inline int iommu_unregister_device_fault_handler(struct device *dev)
901{
902 return 0;
903}
904
905static inline
906int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
907{
908 return -ENODEV;
909}
910
911static inline int iommu_page_response(struct device *dev,
912 struct iommu_page_response *msg)
913{
914 return -ENODEV;
915}
916
917static inline int iommu_group_id(struct iommu_group *group)
918{
919 return -ENODEV;
920}
921
922static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
923 unsigned long quirks)
924{
925 return 0;
926}
927
928static inline int iommu_device_register(struct iommu_device *iommu,
929 const struct iommu_ops *ops,
930 struct device *hwdev)
931{
932 return -ENODEV;
933}
934
935static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
936{
937 return NULL;
938}
939
940static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
941{
942}
943
944static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
945 struct iommu_iotlb_gather *gather,
946 unsigned long iova, size_t size)
947{
948}
949
950static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
951{
952 return false;
953}
954
955static inline void iommu_device_unregister(struct iommu_device *iommu)
956{
957}
958
959static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
960 struct device *parent,
961 const struct attribute_group **groups,
962 const char *fmt, ...)
963{
964 return -ENODEV;
965}
966
967static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
968{
969}
970
971static inline int iommu_device_link(struct device *dev, struct device *link)
972{
973 return -EINVAL;
974}
975
976static inline void iommu_device_unlink(struct device *dev, struct device *link)
977{
978}
979
980static inline int iommu_fwspec_init(struct device *dev,
981 struct fwnode_handle *iommu_fwnode,
982 const struct iommu_ops *ops)
983{
984 return -ENODEV;
985}
986
987static inline void iommu_fwspec_free(struct device *dev)
988{
989}
990
991static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
992 int num_ids)
993{
994 return -ENODEV;
995}
996
997static inline
998const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
999{
1000 return NULL;
1001}
1002
1003static inline bool
1004iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
1005{
1006 return false;
1007}
1008
1009static inline int
1010iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1011{
1012 return -ENODEV;
1013}
1014
1015static inline int
1016iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1017{
1018 return -ENODEV;
1019}
1020
1021static inline int
1022iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
1023{
1024 return -ENODEV;
1025}
1026
1027static inline void
1028iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
1029{
1030}
1031
1032static inline int
1033iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
1034{
1035 return -ENODEV;
1036}
1037
1038static inline struct iommu_sva *
1039iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
1040{
1041 return NULL;
1042}
1043
1044static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1045{
1046}
1047
1048static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1049{
1050 return IOMMU_PASID_INVALID;
1051}
1052
1053static inline int
1054iommu_uapi_cache_invalidate(struct iommu_domain *domain,
1055 struct device *dev,
1056 struct iommu_cache_invalidate_info *inv_info)
1057{
1058 return -ENODEV;
1059}
1060
1061static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
1062 struct device *dev, void __user *udata)
1063{
1064 return -ENODEV;
1065}
1066
1067static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
1068 struct device *dev, void __user *udata)
1069{
1070 return -ENODEV;
1071}
1072
1073static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
1074 struct device *dev,
1075 ioasid_t pasid)
1076{
1077 return -ENODEV;
1078}
1079
1080static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1081{
1082 return NULL;
1083}
1084#endif
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
1097 unsigned long iova, struct sg_table *sgt, int prot)
1098{
1099 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
1100}
1101
1102#ifdef CONFIG_IOMMU_DEBUGFS
1103extern struct dentry *iommu_debugfs_dir;
1104void iommu_debugfs_setup(void);
1105#else
1106static inline void iommu_debugfs_setup(void) {}
1107#endif
1108
1109#endif
1110