linux/include/linux/iova.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (c) 2006, Intel Corporation.
   4 *
   5 * Copyright (C) 2006-2008 Intel Corporation
   6 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   7 */
   8
   9#ifndef _IOVA_H_
  10#define _IOVA_H_
  11
  12#include <linux/types.h>
  13#include <linux/kernel.h>
  14#include <linux/rbtree.h>
  15#include <linux/atomic.h>
  16#include <linux/dma-mapping.h>
  17
  18/* iova structure */
  19struct iova {
  20        struct rb_node  node;
  21        unsigned long   pfn_hi; /* Highest allocated pfn */
  22        unsigned long   pfn_lo; /* Lowest allocated pfn */
  23};
  24
  25struct iova_magazine;
  26struct iova_cpu_rcache;
  27
  28#define IOVA_RANGE_CACHE_MAX_SIZE 6     /* log of max cached IOVA range size (in pages) */
  29#define MAX_GLOBAL_MAGS 32      /* magazines per bin */
  30
  31struct iova_rcache {
  32        spinlock_t lock;
  33        unsigned long depot_size;
  34        struct iova_magazine *depot[MAX_GLOBAL_MAGS];
  35        struct iova_cpu_rcache __percpu *cpu_rcaches;
  36};
  37
  38struct iova_domain;
  39
  40/* Call-Back from IOVA code into IOMMU drivers */
  41typedef void (* iova_flush_cb)(struct iova_domain *domain);
  42
  43/* Destructor for per-entry data */
  44typedef void (* iova_entry_dtor)(unsigned long data);
  45
  46/* Number of entries per Flush Queue */
  47#define IOVA_FQ_SIZE    256
  48
  49/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
  50#define IOVA_FQ_TIMEOUT 10
  51
  52/* Flush Queue entry for defered flushing */
  53struct iova_fq_entry {
  54        unsigned long iova_pfn;
  55        unsigned long pages;
  56        unsigned long data;
  57        u64 counter; /* Flush counter when this entrie was added */
  58};
  59
  60/* Per-CPU Flush Queue structure */
  61struct iova_fq {
  62        struct iova_fq_entry entries[IOVA_FQ_SIZE];
  63        unsigned head, tail;
  64        spinlock_t lock;
  65};
  66
  67/* holds all the iova translations for a domain */
  68struct iova_domain {
  69        spinlock_t      iova_rbtree_lock; /* Lock to protect update of rbtree */
  70        struct rb_root  rbroot;         /* iova domain rbtree root */
  71        struct rb_node  *cached_node;   /* Save last alloced node */
  72        struct rb_node  *cached32_node; /* Save last 32-bit alloced node */
  73        unsigned long   granule;        /* pfn granularity for this domain */
  74        unsigned long   start_pfn;      /* Lower limit for this domain */
  75        unsigned long   dma_32bit_pfn;
  76        unsigned long   max32_alloc_size; /* Size of last failed allocation */
  77        struct iova_fq __percpu *fq;    /* Flush Queue */
  78
  79        atomic64_t      fq_flush_start_cnt;     /* Number of TLB flushes that
  80                                                   have been started */
  81
  82        atomic64_t      fq_flush_finish_cnt;    /* Number of TLB flushes that
  83                                                   have been finished */
  84
  85        struct iova     anchor;         /* rbtree lookup anchor */
  86        struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE];  /* IOVA range caches */
  87
  88        iova_flush_cb   flush_cb;       /* Call-Back function to flush IOMMU
  89                                           TLBs */
  90
  91        iova_entry_dtor entry_dtor;     /* IOMMU driver specific destructor for
  92                                           iova entry */
  93
  94        struct timer_list fq_timer;             /* Timer to regularily empty the
  95                                                   flush-queues */
  96        atomic_t fq_timer_on;                   /* 1 when timer is active, 0
  97                                                   when not */
  98        struct hlist_node       cpuhp_dead;
  99};
 100
 101static inline unsigned long iova_size(struct iova *iova)
 102{
 103        return iova->pfn_hi - iova->pfn_lo + 1;
 104}
 105
 106static inline unsigned long iova_shift(struct iova_domain *iovad)
 107{
 108        return __ffs(iovad->granule);
 109}
 110
 111static inline unsigned long iova_mask(struct iova_domain *iovad)
 112{
 113        return iovad->granule - 1;
 114}
 115
 116static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
 117{
 118        return iova & iova_mask(iovad);
 119}
 120
 121static inline size_t iova_align(struct iova_domain *iovad, size_t size)
 122{
 123        return ALIGN(size, iovad->granule);
 124}
 125
 126static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
 127{
 128        return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
 129}
 130
 131static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
 132{
 133        return iova >> iova_shift(iovad);
 134}
 135
 136#if IS_ENABLED(CONFIG_IOMMU_IOVA)
 137int iova_cache_get(void);
 138void iova_cache_put(void);
 139
 140void free_iova(struct iova_domain *iovad, unsigned long pfn);
 141void __free_iova(struct iova_domain *iovad, struct iova *iova);
 142struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
 143        unsigned long limit_pfn,
 144        bool size_aligned);
 145void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
 146                    unsigned long size);
 147void queue_iova(struct iova_domain *iovad,
 148                unsigned long pfn, unsigned long pages,
 149                unsigned long data);
 150unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
 151                              unsigned long limit_pfn, bool flush_rcache);
 152struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
 153        unsigned long pfn_hi);
 154void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
 155        unsigned long start_pfn);
 156int init_iova_flush_queue(struct iova_domain *iovad,
 157                          iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
 158struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
 159void put_iova_domain(struct iova_domain *iovad);
 160#else
 161static inline int iova_cache_get(void)
 162{
 163        return -ENOTSUPP;
 164}
 165
 166static inline void iova_cache_put(void)
 167{
 168}
 169
 170static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
 171{
 172}
 173
 174static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
 175{
 176}
 177
 178static inline struct iova *alloc_iova(struct iova_domain *iovad,
 179                                      unsigned long size,
 180                                      unsigned long limit_pfn,
 181                                      bool size_aligned)
 182{
 183        return NULL;
 184}
 185
 186static inline void free_iova_fast(struct iova_domain *iovad,
 187                                  unsigned long pfn,
 188                                  unsigned long size)
 189{
 190}
 191
 192static inline void queue_iova(struct iova_domain *iovad,
 193                              unsigned long pfn, unsigned long pages,
 194                              unsigned long data)
 195{
 196}
 197
 198static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
 199                                            unsigned long size,
 200                                            unsigned long limit_pfn,
 201                                            bool flush_rcache)
 202{
 203        return 0;
 204}
 205
 206static inline struct iova *reserve_iova(struct iova_domain *iovad,
 207                                        unsigned long pfn_lo,
 208                                        unsigned long pfn_hi)
 209{
 210        return NULL;
 211}
 212
 213static inline void init_iova_domain(struct iova_domain *iovad,
 214                                    unsigned long granule,
 215                                    unsigned long start_pfn)
 216{
 217}
 218
 219static inline int init_iova_flush_queue(struct iova_domain *iovad,
 220                                        iova_flush_cb flush_cb,
 221                                        iova_entry_dtor entry_dtor)
 222{
 223        return -ENODEV;
 224}
 225
 226static inline struct iova *find_iova(struct iova_domain *iovad,
 227                                     unsigned long pfn)
 228{
 229        return NULL;
 230}
 231
 232static inline void put_iova_domain(struct iova_domain *iovad)
 233{
 234}
 235
 236#endif
 237
 238#endif
 239