linux/include/linux/iommu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   4 * Author: Joerg Roedel <joerg.roedel@amd.com>
   5 */
   6
   7#ifndef __LINUX_IOMMU_H
   8#define __LINUX_IOMMU_H
   9
  10#include <linux/scatterlist.h>
  11#include <linux/device.h>
  12#include <linux/types.h>
  13#include <linux/errno.h>
  14#include <linux/err.h>
  15#include <linux/of.h>
  16#include <linux/ioasid.h>
  17#include <uapi/linux/iommu.h>
  18
  19#define IOMMU_READ      (1 << 0)
  20#define IOMMU_WRITE     (1 << 1)
  21#define IOMMU_CACHE     (1 << 2) /* DMA cache coherency */
  22#define IOMMU_NOEXEC    (1 << 3)
  23#define IOMMU_MMIO      (1 << 4) /* e.g. things like MSI doorbells */
  24/*
  25 * Where the bus hardware includes a privilege level as part of its access type
  26 * markings, and certain devices are capable of issuing transactions marked as
  27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
  28 * given permission flags only apply to accesses at the higher privilege level,
  29 * and that unprivileged transactions should have as little access as possible.
  30 * This would usually imply the same permissions as kernel mappings on the CPU,
  31 * if the IOMMU page table format is equivalent.
  32 */
  33#define IOMMU_PRIV      (1 << 5)
  34
  35struct iommu_ops;
  36struct iommu_group;
  37struct bus_type;
  38struct device;
  39struct iommu_domain;
  40struct notifier_block;
  41struct iommu_sva;
  42struct iommu_fault_event;
  43struct iommu_dma_cookie;
  44
  45/* iommu fault flags */
  46#define IOMMU_FAULT_READ        0x0
  47#define IOMMU_FAULT_WRITE       0x1
  48
  49typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
  50                        struct device *, unsigned long, int, void *);
  51typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
  52
  53struct iommu_domain_geometry {
  54        dma_addr_t aperture_start; /* First address that can be mapped    */
  55        dma_addr_t aperture_end;   /* Last address that can be mapped     */
  56        bool force_aperture;       /* DMA only allowed in mappable range? */
  57};
  58
  59/* Domain feature flags */
  60#define __IOMMU_DOMAIN_PAGING   (1U << 0)  /* Support for iommu_map/unmap */
  61#define __IOMMU_DOMAIN_DMA_API  (1U << 1)  /* Domain for use in DMA-API
  62                                              implementation              */
  63#define __IOMMU_DOMAIN_PT       (1U << 2)  /* Domain is identity mapped   */
  64#define __IOMMU_DOMAIN_DMA_FQ   (1U << 3)  /* DMA-API uses flush queue    */
  65
  66/*
  67 * This are the possible domain-types
  68 *
  69 *      IOMMU_DOMAIN_BLOCKED    - All DMA is blocked, can be used to isolate
  70 *                                devices
  71 *      IOMMU_DOMAIN_IDENTITY   - DMA addresses are system physical addresses
  72 *      IOMMU_DOMAIN_UNMANAGED  - DMA mappings managed by IOMMU-API user, used
  73 *                                for VMs
  74 *      IOMMU_DOMAIN_DMA        - Internally used for DMA-API implementations.
  75 *                                This flag allows IOMMU drivers to implement
  76 *                                certain optimizations for these domains
  77 *      IOMMU_DOMAIN_DMA_FQ     - As above, but definitely using batched TLB
  78 *                                invalidation.
  79 */
  80#define IOMMU_DOMAIN_BLOCKED    (0U)
  81#define IOMMU_DOMAIN_IDENTITY   (__IOMMU_DOMAIN_PT)
  82#define IOMMU_DOMAIN_UNMANAGED  (__IOMMU_DOMAIN_PAGING)
  83#define IOMMU_DOMAIN_DMA        (__IOMMU_DOMAIN_PAGING |        \
  84                                 __IOMMU_DOMAIN_DMA_API)
  85#define IOMMU_DOMAIN_DMA_FQ     (__IOMMU_DOMAIN_PAGING |        \
  86                                 __IOMMU_DOMAIN_DMA_API |       \
  87                                 __IOMMU_DOMAIN_DMA_FQ)
  88
  89struct iommu_domain {
  90        unsigned type;
  91        const struct iommu_ops *ops;
  92        unsigned long pgsize_bitmap;    /* Bitmap of page sizes in use */
  93        iommu_fault_handler_t handler;
  94        void *handler_token;
  95        struct iommu_domain_geometry geometry;
  96        struct iommu_dma_cookie *iova_cookie;
  97};
  98
  99static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
 100{
 101        return domain->type & __IOMMU_DOMAIN_DMA_API;
 102}
 103
 104enum iommu_cap {
 105        IOMMU_CAP_CACHE_COHERENCY,      /* IOMMU can enforce cache coherent DMA
 106                                           transactions */
 107        IOMMU_CAP_INTR_REMAP,           /* IOMMU supports interrupt isolation */
 108        IOMMU_CAP_NOEXEC,               /* IOMMU_NOEXEC flag */
 109};
 110
 111/* These are the possible reserved region types */
 112enum iommu_resv_type {
 113        /* Memory regions which must be mapped 1:1 at all times */
 114        IOMMU_RESV_DIRECT,
 115        /*
 116         * Memory regions which are advertised to be 1:1 but are
 117         * commonly considered relaxable in some conditions,
 118         * for instance in device assignment use case (USB, Graphics)
 119         */
 120        IOMMU_RESV_DIRECT_RELAXABLE,
 121        /* Arbitrary "never map this or give it to a device" address ranges */
 122        IOMMU_RESV_RESERVED,
 123        /* Hardware MSI region (untranslated) */
 124        IOMMU_RESV_MSI,
 125        /* Software-managed MSI translation window */
 126        IOMMU_RESV_SW_MSI,
 127};
 128
 129/**
 130 * struct iommu_resv_region - descriptor for a reserved memory region
 131 * @list: Linked list pointers
 132 * @start: System physical start address of the region
 133 * @length: Length of the region in bytes
 134 * @prot: IOMMU Protection flags (READ/WRITE/...)
 135 * @type: Type of the reserved region
 136 */
 137struct iommu_resv_region {
 138        struct list_head        list;
 139        phys_addr_t             start;
 140        size_t                  length;
 141        int                     prot;
 142        enum iommu_resv_type    type;
 143};
 144
 145/**
 146 * enum iommu_dev_features - Per device IOMMU features
 147 * @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature
 148 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
 149 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
 150 *                       enabling %IOMMU_DEV_FEAT_SVA requires
 151 *                       %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
 152 *                       Faults themselves instead of relying on the IOMMU. When
 153 *                       supported, this feature must be enabled before and
 154 *                       disabled after %IOMMU_DEV_FEAT_SVA.
 155 *
 156 * Device drivers query whether a feature is supported using
 157 * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature().
 158 */
 159enum iommu_dev_features {
 160        IOMMU_DEV_FEAT_AUX,
 161        IOMMU_DEV_FEAT_SVA,
 162        IOMMU_DEV_FEAT_IOPF,
 163};
 164
 165#define IOMMU_PASID_INVALID     (-1U)
 166
 167#ifdef CONFIG_IOMMU_API
 168
 169/**
 170 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
 171 *
 172 * @start: IOVA representing the start of the range to be flushed
 173 * @end: IOVA representing the end of the range to be flushed (inclusive)
 174 * @pgsize: The interval at which to perform the flush
 175 * @freelist: Removed pages to free after sync
 176 * @queued: Indicates that the flush will be queued
 177 *
 178 * This structure is intended to be updated by multiple calls to the
 179 * ->unmap() function in struct iommu_ops before eventually being passed
 180 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
 181 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
 182 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
 183 * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
 184 */
 185struct iommu_iotlb_gather {
 186        unsigned long           start;
 187        unsigned long           end;
 188        size_t                  pgsize;
 189        struct page             *freelist;
 190        bool                    queued;
 191};
 192
 193/**
 194 * struct iommu_ops - iommu ops and capabilities
 195 * @capable: check capability
 196 * @domain_alloc: allocate iommu domain
 197 * @domain_free: free iommu domain
 198 * @attach_dev: attach device to an iommu domain
 199 * @detach_dev: detach device from an iommu domain
 200 * @map: map a physically contiguous memory region to an iommu domain
 201 * @map_pages: map a physically contiguous set of pages of the same size to
 202 *             an iommu domain.
 203 * @unmap: unmap a physically contiguous memory region from an iommu domain
 204 * @unmap_pages: unmap a number of pages of the same size from an iommu domain
 205 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
 206 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
 207 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
 208 *            queue
 209 * @iova_to_phys: translate iova to physical address
 210 * @probe_device: Add device to iommu driver handling
 211 * @release_device: Remove device from iommu driver handling
 212 * @probe_finalize: Do final setup work after the device is added to an IOMMU
 213 *                  group and attached to the groups domain
 214 * @device_group: find iommu group for a particular device
 215 * @enable_nesting: Enable nesting
 216 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
 217 * @get_resv_regions: Request list of reserved regions for a device
 218 * @put_resv_regions: Free list of reserved regions for a device
 219 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
 220 * @of_xlate: add OF master IDs to iommu grouping
 221 * @is_attach_deferred: Check if domain attach should be deferred from iommu
 222 *                      driver init to device driver init (default no)
 223 * @dev_has/enable/disable_feat: per device entries to check/enable/disable
 224 *                               iommu specific features.
 225 * @dev_feat_enabled: check enabled feature
 226 * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
 227 * @aux_get_pasid: get the pasid given an aux-domain
 228 * @sva_bind: Bind process address space to device
 229 * @sva_unbind: Unbind process address space from device
 230 * @sva_get_pasid: Get PASID associated to a SVA handle
 231 * @page_response: handle page request response
 232 * @cache_invalidate: invalidate translation caches
 233 * @sva_bind_gpasid: bind guest pasid and mm
 234 * @sva_unbind_gpasid: unbind guest pasid and mm
 235 * @def_domain_type: device default domain type, return value:
 236 *              - IOMMU_DOMAIN_IDENTITY: must use an identity domain
 237 *              - IOMMU_DOMAIN_DMA: must use a dma domain
 238 *              - 0: use the default setting
 239 * @pgsize_bitmap: bitmap of all possible supported page sizes
 240 * @owner: Driver module providing these ops
 241 */
 242struct iommu_ops {
 243        bool (*capable)(enum iommu_cap);
 244
 245        /* Domain allocation and freeing by the iommu driver */
 246        struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
 247        void (*domain_free)(struct iommu_domain *);
 248
 249        int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
 250        void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
 251        int (*map)(struct iommu_domain *domain, unsigned long iova,
 252                   phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
 253        int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
 254                         phys_addr_t paddr, size_t pgsize, size_t pgcount,
 255                         int prot, gfp_t gfp, size_t *mapped);
 256        size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
 257                     size_t size, struct iommu_iotlb_gather *iotlb_gather);
 258        size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
 259                              size_t pgsize, size_t pgcount,
 260                              struct iommu_iotlb_gather *iotlb_gather);
 261        void (*flush_iotlb_all)(struct iommu_domain *domain);
 262        void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
 263                               size_t size);
 264        void (*iotlb_sync)(struct iommu_domain *domain,
 265                           struct iommu_iotlb_gather *iotlb_gather);
 266        phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
 267        struct iommu_device *(*probe_device)(struct device *dev);
 268        void (*release_device)(struct device *dev);
 269        void (*probe_finalize)(struct device *dev);
 270        struct iommu_group *(*device_group)(struct device *dev);
 271        int (*enable_nesting)(struct iommu_domain *domain);
 272        int (*set_pgtable_quirks)(struct iommu_domain *domain,
 273                                  unsigned long quirks);
 274
 275        /* Request/Free a list of reserved regions for a device */
 276        void (*get_resv_regions)(struct device *dev, struct list_head *list);
 277        void (*put_resv_regions)(struct device *dev, struct list_head *list);
 278        void (*apply_resv_region)(struct device *dev,
 279                                  struct iommu_domain *domain,
 280                                  struct iommu_resv_region *region);
 281
 282        int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
 283        bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
 284
 285        /* Per device IOMMU features */
 286        bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
 287        bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
 288        int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
 289        int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
 290
 291        /* Aux-domain specific attach/detach entries */
 292        int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
 293        void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
 294        int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
 295
 296        struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
 297                                      void *drvdata);
 298        void (*sva_unbind)(struct iommu_sva *handle);
 299        u32 (*sva_get_pasid)(struct iommu_sva *handle);
 300
 301        int (*page_response)(struct device *dev,
 302                             struct iommu_fault_event *evt,
 303                             struct iommu_page_response *msg);
 304        int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
 305                                struct iommu_cache_invalidate_info *inv_info);
 306        int (*sva_bind_gpasid)(struct iommu_domain *domain,
 307                        struct device *dev, struct iommu_gpasid_bind_data *data);
 308
 309        int (*sva_unbind_gpasid)(struct device *dev, u32 pasid);
 310
 311        int (*def_domain_type)(struct device *dev);
 312
 313        unsigned long pgsize_bitmap;
 314        struct module *owner;
 315};
 316
 317/**
 318 * struct iommu_device - IOMMU core representation of one IOMMU hardware
 319 *                       instance
 320 * @list: Used by the iommu-core to keep a list of registered iommus
 321 * @ops: iommu-ops for talking to this iommu
 322 * @dev: struct device for sysfs handling
 323 */
 324struct iommu_device {
 325        struct list_head list;
 326        const struct iommu_ops *ops;
 327        struct fwnode_handle *fwnode;
 328        struct device *dev;
 329};
 330
 331/**
 332 * struct iommu_fault_event - Generic fault event
 333 *
 334 * Can represent recoverable faults such as a page requests or
 335 * unrecoverable faults such as DMA or IRQ remapping faults.
 336 *
 337 * @fault: fault descriptor
 338 * @list: pending fault event list, used for tracking responses
 339 */
 340struct iommu_fault_event {
 341        struct iommu_fault fault;
 342        struct list_head list;
 343};
 344
 345/**
 346 * struct iommu_fault_param - per-device IOMMU fault data
 347 * @handler: Callback function to handle IOMMU faults at device level
 348 * @data: handler private data
 349 * @faults: holds the pending faults which needs response
 350 * @lock: protect pending faults list
 351 */
 352struct iommu_fault_param {
 353        iommu_dev_fault_handler_t handler;
 354        void *data;
 355        struct list_head faults;
 356        struct mutex lock;
 357};
 358
 359/**
 360 * struct dev_iommu - Collection of per-device IOMMU data
 361 *
 362 * @fault_param: IOMMU detected device fault reporting data
 363 * @iopf_param:  I/O Page Fault queue and data
 364 * @fwspec:      IOMMU fwspec data
 365 * @iommu_dev:   IOMMU device this device is linked to
 366 * @priv:        IOMMU Driver private data
 367 *
 368 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
 369 *      struct iommu_group      *iommu_group;
 370 */
 371struct dev_iommu {
 372        struct mutex lock;
 373        struct iommu_fault_param        *fault_param;
 374        struct iopf_device_param        *iopf_param;
 375        struct iommu_fwspec             *fwspec;
 376        struct iommu_device             *iommu_dev;
 377        void                            *priv;
 378};
 379
 380int iommu_device_register(struct iommu_device *iommu,
 381                          const struct iommu_ops *ops,
 382                          struct device *hwdev);
 383void iommu_device_unregister(struct iommu_device *iommu);
 384int  iommu_device_sysfs_add(struct iommu_device *iommu,
 385                            struct device *parent,
 386                            const struct attribute_group **groups,
 387                            const char *fmt, ...) __printf(4, 5);
 388void iommu_device_sysfs_remove(struct iommu_device *iommu);
 389int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
 390void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
 391int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
 392
 393static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
 394{
 395        return (struct iommu_device *)dev_get_drvdata(dev);
 396}
 397
 398static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
 399{
 400        *gather = (struct iommu_iotlb_gather) {
 401                .start  = ULONG_MAX,
 402        };
 403}
 404
 405#define IOMMU_GROUP_NOTIFY_ADD_DEVICE           1 /* Device added */
 406#define IOMMU_GROUP_NOTIFY_DEL_DEVICE           2 /* Pre Device removed */
 407#define IOMMU_GROUP_NOTIFY_BIND_DRIVER          3 /* Pre Driver bind */
 408#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER         4 /* Post Driver bind */
 409#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER        5 /* Pre Driver unbind */
 410#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER       6 /* Post Driver unbind */
 411
 412extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
 413extern int bus_iommu_probe(struct bus_type *bus);
 414extern bool iommu_present(struct bus_type *bus);
 415extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
 416extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
 417extern struct iommu_group *iommu_group_get_by_id(int id);
 418extern void iommu_domain_free(struct iommu_domain *domain);
 419extern int iommu_attach_device(struct iommu_domain *domain,
 420                               struct device *dev);
 421extern void iommu_detach_device(struct iommu_domain *domain,
 422                                struct device *dev);
 423extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain,
 424                                       struct device *dev,
 425                                       void __user *uinfo);
 426
 427extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
 428                                      struct device *dev, void __user *udata);
 429extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
 430                                        struct device *dev, void __user *udata);
 431extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
 432                                   struct device *dev, ioasid_t pasid);
 433extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
 434extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
 435extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
 436                     phys_addr_t paddr, size_t size, int prot);
 437extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
 438                            phys_addr_t paddr, size_t size, int prot);
 439extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
 440                          size_t size);
 441extern size_t iommu_unmap_fast(struct iommu_domain *domain,
 442                               unsigned long iova, size_t size,
 443                               struct iommu_iotlb_gather *iotlb_gather);
 444extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
 445                struct scatterlist *sg, unsigned int nents, int prot);
 446extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
 447                                   unsigned long iova, struct scatterlist *sg,
 448                                   unsigned int nents, int prot);
 449extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
 450extern void iommu_set_fault_handler(struct iommu_domain *domain,
 451                        iommu_fault_handler_t handler, void *token);
 452
 453extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
 454extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
 455extern void generic_iommu_put_resv_regions(struct device *dev,
 456                                           struct list_head *list);
 457extern void iommu_set_default_passthrough(bool cmd_line);
 458extern void iommu_set_default_translated(bool cmd_line);
 459extern bool iommu_default_passthrough(void);
 460extern struct iommu_resv_region *
 461iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
 462                        enum iommu_resv_type type);
 463extern int iommu_get_group_resv_regions(struct iommu_group *group,
 464                                        struct list_head *head);
 465
 466extern int iommu_attach_group(struct iommu_domain *domain,
 467                              struct iommu_group *group);
 468extern void iommu_detach_group(struct iommu_domain *domain,
 469                               struct iommu_group *group);
 470extern struct iommu_group *iommu_group_alloc(void);
 471extern void *iommu_group_get_iommudata(struct iommu_group *group);
 472extern void iommu_group_set_iommudata(struct iommu_group *group,
 473                                      void *iommu_data,
 474                                      void (*release)(void *iommu_data));
 475extern int iommu_group_set_name(struct iommu_group *group, const char *name);
 476extern int iommu_group_add_device(struct iommu_group *group,
 477                                  struct device *dev);
 478extern void iommu_group_remove_device(struct device *dev);
 479extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
 480                                    int (*fn)(struct device *, void *));
 481extern struct iommu_group *iommu_group_get(struct device *dev);
 482extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
 483extern void iommu_group_put(struct iommu_group *group);
 484extern int iommu_group_register_notifier(struct iommu_group *group,
 485                                         struct notifier_block *nb);
 486extern int iommu_group_unregister_notifier(struct iommu_group *group,
 487                                           struct notifier_block *nb);
 488extern int iommu_register_device_fault_handler(struct device *dev,
 489                                        iommu_dev_fault_handler_t handler,
 490                                        void *data);
 491
 492extern int iommu_unregister_device_fault_handler(struct device *dev);
 493
 494extern int iommu_report_device_fault(struct device *dev,
 495                                     struct iommu_fault_event *evt);
 496extern int iommu_page_response(struct device *dev,
 497                               struct iommu_page_response *msg);
 498
 499extern int iommu_group_id(struct iommu_group *group);
 500extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
 501
 502int iommu_enable_nesting(struct iommu_domain *domain);
 503int iommu_set_pgtable_quirks(struct iommu_domain *domain,
 504                unsigned long quirks);
 505
 506void iommu_set_dma_strict(void);
 507
 508extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
 509                              unsigned long iova, int flags);
 510
 511static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
 512{
 513        if (domain->ops->flush_iotlb_all)
 514                domain->ops->flush_iotlb_all(domain);
 515}
 516
 517static inline void iommu_iotlb_sync(struct iommu_domain *domain,
 518                                  struct iommu_iotlb_gather *iotlb_gather)
 519{
 520        if (domain->ops->iotlb_sync)
 521                domain->ops->iotlb_sync(domain, iotlb_gather);
 522
 523        iommu_iotlb_gather_init(iotlb_gather);
 524}
 525
 526/**
 527 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
 528 *
 529 * @gather: TLB gather data
 530 * @iova: start of page to invalidate
 531 * @size: size of page to invalidate
 532 *
 533 * Helper for IOMMU drivers to check whether a new range and the gathered range
 534 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
 535 * than merging the two, which might lead to unnecessary invalidations.
 536 */
 537static inline
 538bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
 539                                    unsigned long iova, size_t size)
 540{
 541        unsigned long start = iova, end = start + size - 1;
 542
 543        return gather->end != 0 &&
 544                (end + 1 < gather->start || start > gather->end + 1);
 545}
 546
 547
 548/**
 549 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
 550 * @gather: TLB gather data
 551 * @iova: start of page to invalidate
 552 * @size: size of page to invalidate
 553 *
 554 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
 555 * where only the address range matters, and simply minimising intermediate
 556 * syncs is preferred.
 557 */
 558static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
 559                                                unsigned long iova, size_t size)
 560{
 561        unsigned long end = iova + size - 1;
 562
 563        if (gather->start > iova)
 564                gather->start = iova;
 565        if (gather->end < end)
 566                gather->end = end;
 567}
 568
 569/**
 570 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
 571 * @domain: IOMMU domain to be invalidated
 572 * @gather: TLB gather data
 573 * @iova: start of page to invalidate
 574 * @size: size of page to invalidate
 575 *
 576 * Helper for IOMMU drivers to build invalidation commands based on individual
 577 * pages, or with page size/table level hints which cannot be gathered if they
 578 * differ.
 579 */
 580static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
 581                                               struct iommu_iotlb_gather *gather,
 582                                               unsigned long iova, size_t size)
 583{
 584        /*
 585         * If the new page is disjoint from the current range or is mapped at
 586         * a different granularity, then sync the TLB so that the gather
 587         * structure can be rewritten.
 588         */
 589        if ((gather->pgsize && gather->pgsize != size) ||
 590            iommu_iotlb_gather_is_disjoint(gather, iova, size))
 591                iommu_iotlb_sync(domain, gather);
 592
 593        gather->pgsize = size;
 594        iommu_iotlb_gather_add_range(gather, iova, size);
 595}
 596
 597static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
 598{
 599        return gather && gather->queued;
 600}
 601
 602/* PCI device grouping function */
 603extern struct iommu_group *pci_device_group(struct device *dev);
 604/* Generic device grouping function */
 605extern struct iommu_group *generic_device_group(struct device *dev);
 606/* FSL-MC device grouping function */
 607struct iommu_group *fsl_mc_device_group(struct device *dev);
 608
 609/**
 610 * struct iommu_fwspec - per-device IOMMU instance data
 611 * @ops: ops for this device's IOMMU
 612 * @iommu_fwnode: firmware handle for this device's IOMMU
 613 * @flags: IOMMU_FWSPEC_* flags
 614 * @num_ids: number of associated device IDs
 615 * @ids: IDs which this device may present to the IOMMU
 616 */
 617struct iommu_fwspec {
 618        const struct iommu_ops  *ops;
 619        struct fwnode_handle    *iommu_fwnode;
 620        u32                     flags;
 621        unsigned int            num_ids;
 622        u32                     ids[];
 623};
 624
 625/* ATS is supported */
 626#define IOMMU_FWSPEC_PCI_RC_ATS                 (1 << 0)
 627
 628/**
 629 * struct iommu_sva - handle to a device-mm bond
 630 */
 631struct iommu_sva {
 632        struct device                   *dev;
 633};
 634
 635int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
 636                      const struct iommu_ops *ops);
 637void iommu_fwspec_free(struct device *dev);
 638int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
 639const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
 640
 641static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
 642{
 643        if (dev->iommu)
 644                return dev->iommu->fwspec;
 645        else
 646                return NULL;
 647}
 648
 649static inline void dev_iommu_fwspec_set(struct device *dev,
 650                                        struct iommu_fwspec *fwspec)
 651{
 652        dev->iommu->fwspec = fwspec;
 653}
 654
 655static inline void *dev_iommu_priv_get(struct device *dev)
 656{
 657        if (dev->iommu)
 658                return dev->iommu->priv;
 659        else
 660                return NULL;
 661}
 662
 663static inline void dev_iommu_priv_set(struct device *dev, void *priv)
 664{
 665        dev->iommu->priv = priv;
 666}
 667
 668int iommu_probe_device(struct device *dev);
 669void iommu_release_device(struct device *dev);
 670
 671int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
 672int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
 673bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
 674int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
 675void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
 676int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
 677
 678struct iommu_sva *iommu_sva_bind_device(struct device *dev,
 679                                        struct mm_struct *mm,
 680                                        void *drvdata);
 681void iommu_sva_unbind_device(struct iommu_sva *handle);
 682u32 iommu_sva_get_pasid(struct iommu_sva *handle);
 683
 684#else /* CONFIG_IOMMU_API */
 685
 686struct iommu_ops {};
 687struct iommu_group {};
 688struct iommu_fwspec {};
 689struct iommu_device {};
 690struct iommu_fault_param {};
 691struct iommu_iotlb_gather {};
 692
 693static inline bool iommu_present(struct bus_type *bus)
 694{
 695        return false;
 696}
 697
 698static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
 699{
 700        return false;
 701}
 702
 703static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
 704{
 705        return NULL;
 706}
 707
 708static inline struct iommu_group *iommu_group_get_by_id(int id)
 709{
 710        return NULL;
 711}
 712
 713static inline void iommu_domain_free(struct iommu_domain *domain)
 714{
 715}
 716
 717static inline int iommu_attach_device(struct iommu_domain *domain,
 718                                      struct device *dev)
 719{
 720        return -ENODEV;
 721}
 722
 723static inline void iommu_detach_device(struct iommu_domain *domain,
 724                                       struct device *dev)
 725{
 726}
 727
 728static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
 729{
 730        return NULL;
 731}
 732
 733static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
 734                            phys_addr_t paddr, size_t size, int prot)
 735{
 736        return -ENODEV;
 737}
 738
 739static inline int iommu_map_atomic(struct iommu_domain *domain,
 740                                   unsigned long iova, phys_addr_t paddr,
 741                                   size_t size, int prot)
 742{
 743        return -ENODEV;
 744}
 745
 746static inline size_t iommu_unmap(struct iommu_domain *domain,
 747                                 unsigned long iova, size_t size)
 748{
 749        return 0;
 750}
 751
 752static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
 753                                      unsigned long iova, int gfp_order,
 754                                      struct iommu_iotlb_gather *iotlb_gather)
 755{
 756        return 0;
 757}
 758
 759static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
 760                                   unsigned long iova, struct scatterlist *sg,
 761                                   unsigned int nents, int prot)
 762{
 763        return -ENODEV;
 764}
 765
 766static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain,
 767                                  unsigned long iova, struct scatterlist *sg,
 768                                  unsigned int nents, int prot)
 769{
 770        return -ENODEV;
 771}
 772
 773static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
 774{
 775}
 776
 777static inline void iommu_iotlb_sync(struct iommu_domain *domain,
 778                                  struct iommu_iotlb_gather *iotlb_gather)
 779{
 780}
 781
 782static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
 783{
 784        return 0;
 785}
 786
 787static inline void iommu_set_fault_handler(struct iommu_domain *domain,
 788                                iommu_fault_handler_t handler, void *token)
 789{
 790}
 791
 792static inline void iommu_get_resv_regions(struct device *dev,
 793                                        struct list_head *list)
 794{
 795}
 796
 797static inline void iommu_put_resv_regions(struct device *dev,
 798                                        struct list_head *list)
 799{
 800}
 801
 802static inline int iommu_get_group_resv_regions(struct iommu_group *group,
 803                                               struct list_head *head)
 804{
 805        return -ENODEV;
 806}
 807
 808static inline void iommu_set_default_passthrough(bool cmd_line)
 809{
 810}
 811
 812static inline void iommu_set_default_translated(bool cmd_line)
 813{
 814}
 815
 816static inline bool iommu_default_passthrough(void)
 817{
 818        return true;
 819}
 820
 821static inline int iommu_attach_group(struct iommu_domain *domain,
 822                                     struct iommu_group *group)
 823{
 824        return -ENODEV;
 825}
 826
 827static inline void iommu_detach_group(struct iommu_domain *domain,
 828                                      struct iommu_group *group)
 829{
 830}
 831
 832static inline struct iommu_group *iommu_group_alloc(void)
 833{
 834        return ERR_PTR(-ENODEV);
 835}
 836
 837static inline void *iommu_group_get_iommudata(struct iommu_group *group)
 838{
 839        return NULL;
 840}
 841
 842static inline void iommu_group_set_iommudata(struct iommu_group *group,
 843                                             void *iommu_data,
 844                                             void (*release)(void *iommu_data))
 845{
 846}
 847
 848static inline int iommu_group_set_name(struct iommu_group *group,
 849                                       const char *name)
 850{
 851        return -ENODEV;
 852}
 853
 854static inline int iommu_group_add_device(struct iommu_group *group,
 855                                         struct device *dev)
 856{
 857        return -ENODEV;
 858}
 859
 860static inline void iommu_group_remove_device(struct device *dev)
 861{
 862}
 863
 864static inline int iommu_group_for_each_dev(struct iommu_group *group,
 865                                           void *data,
 866                                           int (*fn)(struct device *, void *))
 867{
 868        return -ENODEV;
 869}
 870
 871static inline struct iommu_group *iommu_group_get(struct device *dev)
 872{
 873        return NULL;
 874}
 875
 876static inline void iommu_group_put(struct iommu_group *group)
 877{
 878}
 879
 880static inline int iommu_group_register_notifier(struct iommu_group *group,
 881                                                struct notifier_block *nb)
 882{
 883        return -ENODEV;
 884}
 885
 886static inline int iommu_group_unregister_notifier(struct iommu_group *group,
 887                                                  struct notifier_block *nb)
 888{
 889        return 0;
 890}
 891
 892static inline
 893int iommu_register_device_fault_handler(struct device *dev,
 894                                        iommu_dev_fault_handler_t handler,
 895                                        void *data)
 896{
 897        return -ENODEV;
 898}
 899
 900static inline int iommu_unregister_device_fault_handler(struct device *dev)
 901{
 902        return 0;
 903}
 904
 905static inline
 906int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
 907{
 908        return -ENODEV;
 909}
 910
 911static inline int iommu_page_response(struct device *dev,
 912                                      struct iommu_page_response *msg)
 913{
 914        return -ENODEV;
 915}
 916
 917static inline int iommu_group_id(struct iommu_group *group)
 918{
 919        return -ENODEV;
 920}
 921
 922static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
 923                unsigned long quirks)
 924{
 925        return 0;
 926}
 927
 928static inline int iommu_device_register(struct iommu_device *iommu,
 929                                        const struct iommu_ops *ops,
 930                                        struct device *hwdev)
 931{
 932        return -ENODEV;
 933}
 934
 935static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
 936{
 937        return NULL;
 938}
 939
 940static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
 941{
 942}
 943
 944static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
 945                                               struct iommu_iotlb_gather *gather,
 946                                               unsigned long iova, size_t size)
 947{
 948}
 949
 950static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
 951{
 952        return false;
 953}
 954
 955static inline void iommu_device_unregister(struct iommu_device *iommu)
 956{
 957}
 958
 959static inline int  iommu_device_sysfs_add(struct iommu_device *iommu,
 960                                          struct device *parent,
 961                                          const struct attribute_group **groups,
 962                                          const char *fmt, ...)
 963{
 964        return -ENODEV;
 965}
 966
 967static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
 968{
 969}
 970
 971static inline int iommu_device_link(struct device *dev, struct device *link)
 972{
 973        return -EINVAL;
 974}
 975
 976static inline void iommu_device_unlink(struct device *dev, struct device *link)
 977{
 978}
 979
 980static inline int iommu_fwspec_init(struct device *dev,
 981                                    struct fwnode_handle *iommu_fwnode,
 982                                    const struct iommu_ops *ops)
 983{
 984        return -ENODEV;
 985}
 986
 987static inline void iommu_fwspec_free(struct device *dev)
 988{
 989}
 990
 991static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
 992                                       int num_ids)
 993{
 994        return -ENODEV;
 995}
 996
 997static inline
 998const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
 999{
1000        return NULL;
1001}
1002
1003static inline bool
1004iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
1005{
1006        return false;
1007}
1008
1009static inline int
1010iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1011{
1012        return -ENODEV;
1013}
1014
1015static inline int
1016iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1017{
1018        return -ENODEV;
1019}
1020
1021static inline int
1022iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
1023{
1024        return -ENODEV;
1025}
1026
1027static inline void
1028iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
1029{
1030}
1031
1032static inline int
1033iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
1034{
1035        return -ENODEV;
1036}
1037
1038static inline struct iommu_sva *
1039iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
1040{
1041        return NULL;
1042}
1043
1044static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1045{
1046}
1047
1048static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1049{
1050        return IOMMU_PASID_INVALID;
1051}
1052
1053static inline int
1054iommu_uapi_cache_invalidate(struct iommu_domain *domain,
1055                            struct device *dev,
1056                            struct iommu_cache_invalidate_info *inv_info)
1057{
1058        return -ENODEV;
1059}
1060
1061static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
1062                                             struct device *dev, void __user *udata)
1063{
1064        return -ENODEV;
1065}
1066
1067static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
1068                                               struct device *dev, void __user *udata)
1069{
1070        return -ENODEV;
1071}
1072
1073static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
1074                                          struct device *dev,
1075                                          ioasid_t pasid)
1076{
1077        return -ENODEV;
1078}
1079
1080static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1081{
1082        return NULL;
1083}
1084#endif /* CONFIG_IOMMU_API */
1085
1086/**
1087 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1088 * @domain:     The IOMMU domain to perform the mapping
1089 * @iova:       The start address to map the buffer
1090 * @sgt:        The sg_table object describing the buffer
1091 * @prot:       IOMMU protection bits
1092 *
1093 * Creates a mapping at @iova for the buffer described by a scatterlist
1094 * stored in the given sg_table object in the provided IOMMU domain.
1095 */
1096static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
1097                        unsigned long iova, struct sg_table *sgt, int prot)
1098{
1099        return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot);
1100}
1101
1102#ifdef CONFIG_IOMMU_DEBUGFS
1103extern  struct dentry *iommu_debugfs_dir;
1104void iommu_debugfs_setup(void);
1105#else
1106static inline void iommu_debugfs_setup(void) {}
1107#endif
1108
1109#endif /* __LINUX_IOMMU_H */
1110