linux/include/linux/io-pgtable.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __IO_PGTABLE_H
   3#define __IO_PGTABLE_H
   4#include <linux/bitops.h>
   5
   6/*
   7 * Public API for use by IOMMU drivers
   8 */
   9enum io_pgtable_fmt {
  10        ARM_32_LPAE_S1,
  11        ARM_32_LPAE_S2,
  12        ARM_64_LPAE_S1,
  13        ARM_64_LPAE_S2,
  14        ARM_V7S,
  15        ARM_MALI_LPAE,
  16        IO_PGTABLE_NUM_FMTS,
  17};
  18
  19/**
  20 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
  21 *
  22 * @tlb_flush_all: Synchronously invalidate the entire TLB context.
  23 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
  24 * @tlb_sync:      Ensure any queued TLB invalidation has taken effect, and
  25 *                 any corresponding page table updates are visible to the
  26 *                 IOMMU.
  27 *
  28 * Note that these can all be called in atomic context and must therefore
  29 * not block.
  30 */
  31struct iommu_gather_ops {
  32        void (*tlb_flush_all)(void *cookie);
  33        void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
  34                              bool leaf, void *cookie);
  35        void (*tlb_sync)(void *cookie);
  36};
  37
  38/**
  39 * struct io_pgtable_cfg - Configuration data for a set of page tables.
  40 *
  41 * @quirks:        A bitmap of hardware quirks that require some special
  42 *                 action by the low-level page table allocator.
  43 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page
  44 *                 tables.
  45 * @ias:           Input address (iova) size, in bits.
  46 * @oas:           Output address (paddr) size, in bits.
  47 * @tlb:           TLB management callbacks for this set of tables.
  48 * @iommu_dev:     The device representing the DMA configuration for the
  49 *                 page table walker.
  50 */
  51struct io_pgtable_cfg {
  52        /*
  53         * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in
  54         *      stage 1 PTEs, for hardware which insists on validating them
  55         *      even in non-secure state where they should normally be ignored.
  56         *
  57         * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and
  58         *      IOMMU_NOEXEC flags and map everything with full access, for
  59         *      hardware which does not implement the permissions of a given
  60         *      format, and/or requires some format-specific default value.
  61         *
  62         * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
  63         *      (unmapped) entries but the hardware might do so anyway, perform
  64         *      TLB maintenance when mapping as well as when unmapping.
  65         *
  66         * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
  67         *      PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
  68         *      when the SoC is in "4GB mode" and they can only access the high
  69         *      remap of DRAM (0x1_00000000 to 0x1_ffffffff).
  70         *
  71         * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever
  72         *      be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
  73         *      software-emulated IOMMU), such that pagetable updates need not
  74         *      be treated as explicit DMA data.
  75         *
  76         * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
  77         *      on unmap, for DMA domains using the flush queue mechanism for
  78         *      delayed invalidation.
  79         */
  80        #define IO_PGTABLE_QUIRK_ARM_NS         BIT(0)
  81        #define IO_PGTABLE_QUIRK_NO_PERMS       BIT(1)
  82        #define IO_PGTABLE_QUIRK_TLBI_ON_MAP    BIT(2)
  83        #define IO_PGTABLE_QUIRK_ARM_MTK_4GB    BIT(3)
  84        #define IO_PGTABLE_QUIRK_NO_DMA         BIT(4)
  85        #define IO_PGTABLE_QUIRK_NON_STRICT     BIT(5)
  86        unsigned long                   quirks;
  87        unsigned long                   pgsize_bitmap;
  88        unsigned int                    ias;
  89        unsigned int                    oas;
  90        const struct iommu_gather_ops   *tlb;
  91        struct device                   *iommu_dev;
  92
  93        /* Low-level data specific to the table format */
  94        union {
  95                struct {
  96                        u64     ttbr[2];
  97                        u64     tcr;
  98                        u64     mair[2];
  99                } arm_lpae_s1_cfg;
 100
 101                struct {
 102                        u64     vttbr;
 103                        u64     vtcr;
 104                } arm_lpae_s2_cfg;
 105
 106                struct {
 107                        u32     ttbr[2];
 108                        u32     tcr;
 109                        u32     nmrr;
 110                        u32     prrr;
 111                } arm_v7s_cfg;
 112
 113                struct {
 114                        u64     transtab;
 115                        u64     memattr;
 116                } arm_mali_lpae_cfg;
 117        };
 118};
 119
 120/**
 121 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
 122 *
 123 * @map:          Map a physically contiguous memory region.
 124 * @unmap:        Unmap a physically contiguous memory region.
 125 * @iova_to_phys: Translate iova to physical address.
 126 *
 127 * These functions map directly onto the iommu_ops member functions with
 128 * the same names.
 129 */
 130struct io_pgtable_ops {
 131        int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
 132                   phys_addr_t paddr, size_t size, int prot);
 133        size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
 134                        size_t size);
 135        phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
 136                                    unsigned long iova);
 137};
 138
 139/**
 140 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
 141 *
 142 * @fmt:    The page table format.
 143 * @cfg:    The page table configuration. This will be modified to represent
 144 *          the configuration actually provided by the allocator (e.g. the
 145 *          pgsize_bitmap may be restricted).
 146 * @cookie: An opaque token provided by the IOMMU driver and passed back to
 147 *          the callback routines in cfg->tlb.
 148 */
 149struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
 150                                            struct io_pgtable_cfg *cfg,
 151                                            void *cookie);
 152
 153/**
 154 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
 155 *                         *must* ensure that the page table is no longer
 156 *                         live, but the TLB can be dirty.
 157 *
 158 * @ops: The ops returned from alloc_io_pgtable_ops.
 159 */
 160void free_io_pgtable_ops(struct io_pgtable_ops *ops);
 161
 162
 163/*
 164 * Internal structures for page table allocator implementations.
 165 */
 166
 167/**
 168 * struct io_pgtable - Internal structure describing a set of page tables.
 169 *
 170 * @fmt:    The page table format.
 171 * @cookie: An opaque token provided by the IOMMU driver and passed back to
 172 *          any callback routines.
 173 * @cfg:    A copy of the page table configuration.
 174 * @ops:    The page table operations in use for this set of page tables.
 175 */
 176struct io_pgtable {
 177        enum io_pgtable_fmt     fmt;
 178        void                    *cookie;
 179        struct io_pgtable_cfg   cfg;
 180        struct io_pgtable_ops   ops;
 181};
 182
 183#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
 184
 185static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
 186{
 187        iop->cfg.tlb->tlb_flush_all(iop->cookie);
 188}
 189
 190static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
 191                unsigned long iova, size_t size, size_t granule, bool leaf)
 192{
 193        iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
 194}
 195
 196static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
 197{
 198        iop->cfg.tlb->tlb_sync(iop->cookie);
 199}
 200
 201/**
 202 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
 203 *                              particular format.
 204 *
 205 * @alloc: Allocate a set of page tables described by cfg.
 206 * @free:  Free the page tables associated with iop.
 207 */
 208struct io_pgtable_init_fns {
 209        struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
 210        void (*free)(struct io_pgtable *iop);
 211};
 212
 213extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
 214extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
 215extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
 216extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
 217extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
 218extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
 219
 220#endif /* __IO_PGTABLE_H */
 221