linux/lib/iommu-common.c
<<
>>
Prefs
   1/*
   2 * IOMMU mmap management and range allocation functions.
   3 * Based almost entirely upon the powerpc iommu allocator.
   4 */
   5
   6#include <linux/export.h>
   7#include <linux/bitmap.h>
   8#include <linux/bug.h>
   9#include <linux/iommu-helper.h>
  10#include <linux/iommu-common.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/hash.h>
  13
  14#ifndef DMA_ERROR_CODE
  15#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
  16#endif
  17
  18static unsigned long iommu_large_alloc = 15;
  19
  20static  DEFINE_PER_CPU(unsigned int, iommu_hash_common);
  21
  22static inline bool need_flush(struct iommu_map_table *iommu)
  23{
  24        return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
  25}
  26
  27static inline void set_flush(struct iommu_map_table *iommu)
  28{
  29        iommu->flags |= IOMMU_NEED_FLUSH;
  30}
  31
  32static inline void clear_flush(struct iommu_map_table *iommu)
  33{
  34        iommu->flags &= ~IOMMU_NEED_FLUSH;
  35}
  36
  37static void setup_iommu_pool_hash(void)
  38{
  39        unsigned int i;
  40        static bool do_once;
  41
  42        if (do_once)
  43                return;
  44        do_once = true;
  45        for_each_possible_cpu(i)
  46                per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
  47}
  48
  49/*
  50 * Initialize iommu_pool entries for the iommu_map_table. `num_entries'
  51 * is the number of table entries. If `large_pool' is set to true,
  52 * the top 1/4 of the table will be set aside for pool allocations
  53 * of more than iommu_large_alloc pages.
  54 */
  55void iommu_tbl_pool_init(struct iommu_map_table *iommu,
  56                         unsigned long num_entries,
  57                         u32 table_shift,
  58                         void (*lazy_flush)(struct iommu_map_table *),
  59                         bool large_pool, u32 npools,
  60                         bool skip_span_boundary_check)
  61{
  62        unsigned int start, i;
  63        struct iommu_pool *p = &(iommu->large_pool);
  64
  65        setup_iommu_pool_hash();
  66        if (npools == 0)
  67                iommu->nr_pools = IOMMU_NR_POOLS;
  68        else
  69                iommu->nr_pools = npools;
  70        BUG_ON(npools > IOMMU_NR_POOLS);
  71
  72        iommu->table_shift = table_shift;
  73        iommu->lazy_flush = lazy_flush;
  74        start = 0;
  75        if (skip_span_boundary_check)
  76                iommu->flags |= IOMMU_NO_SPAN_BOUND;
  77        if (large_pool)
  78                iommu->flags |= IOMMU_HAS_LARGE_POOL;
  79
  80        if (!large_pool)
  81                iommu->poolsize = num_entries/iommu->nr_pools;
  82        else
  83                iommu->poolsize = (num_entries * 3 / 4)/iommu->nr_pools;
  84        for (i = 0; i < iommu->nr_pools; i++) {
  85                spin_lock_init(&(iommu->pools[i].lock));
  86                iommu->pools[i].start = start;
  87                iommu->pools[i].hint = start;
  88                start += iommu->poolsize; /* start for next pool */
  89                iommu->pools[i].end = start - 1;
  90        }
  91        if (!large_pool)
  92                return;
  93        /* initialize large_pool */
  94        spin_lock_init(&(p->lock));
  95        p->start = start;
  96        p->hint = p->start;
  97        p->end = num_entries;
  98}
  99EXPORT_SYMBOL(iommu_tbl_pool_init);
 100
 101unsigned long iommu_tbl_range_alloc(struct device *dev,
 102                                struct iommu_map_table *iommu,
 103                                unsigned long npages,
 104                                unsigned long *handle,
 105                                unsigned long mask,
 106                                unsigned int align_order)
 107{
 108        unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
 109        unsigned long n, end, start, limit, boundary_size;
 110        struct iommu_pool *pool;
 111        int pass = 0;
 112        unsigned int pool_nr;
 113        unsigned int npools = iommu->nr_pools;
 114        unsigned long flags;
 115        bool large_pool = ((iommu->flags & IOMMU_HAS_LARGE_POOL) != 0);
 116        bool largealloc = (large_pool && npages > iommu_large_alloc);
 117        unsigned long shift;
 118        unsigned long align_mask = 0;
 119
 120        if (align_order > 0)
 121                align_mask = ~0ul >> (BITS_PER_LONG - align_order);
 122
 123        /* Sanity check */
 124        if (unlikely(npages == 0)) {
 125                WARN_ON_ONCE(1);
 126                return DMA_ERROR_CODE;
 127        }
 128
 129        if (largealloc) {
 130                pool = &(iommu->large_pool);
 131                pool_nr = 0; /* to keep compiler happy */
 132        } else {
 133                /* pick out pool_nr */
 134                pool_nr =  pool_hash & (npools - 1);
 135                pool = &(iommu->pools[pool_nr]);
 136        }
 137        spin_lock_irqsave(&pool->lock, flags);
 138
 139 again:
 140        if (pass == 0 && handle && *handle &&
 141            (*handle >= pool->start) && (*handle < pool->end))
 142                start = *handle;
 143        else
 144                start = pool->hint;
 145
 146        limit = pool->end;
 147
 148        /* The case below can happen if we have a small segment appended
 149         * to a large, or when the previous alloc was at the very end of
 150         * the available space. If so, go back to the beginning. If a
 151         * flush is needed, it will get done based on the return value
 152         * from iommu_area_alloc() below.
 153         */
 154        if (start >= limit)
 155                start = pool->start;
 156        shift = iommu->table_map_base >> iommu->table_shift;
 157        if (limit + shift > mask) {
 158                limit = mask - shift + 1;
 159                /* If we're constrained on address range, first try
 160                 * at the masked hint to avoid O(n) search complexity,
 161                 * but on second pass, start at 0 in pool 0.
 162                 */
 163                if ((start & mask) >= limit || pass > 0) {
 164                        spin_unlock(&(pool->lock));
 165                        pool = &(iommu->pools[0]);
 166                        spin_lock(&(pool->lock));
 167                        start = pool->start;
 168                } else {
 169                        start &= mask;
 170                }
 171        }
 172
 173        if (dev)
 174                boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 175                                      1 << iommu->table_shift);
 176        else
 177                boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
 178
 179        boundary_size = boundary_size >> iommu->table_shift;
 180        /*
 181         * if the skip_span_boundary_check had been set during init, we set
 182         * things up so that iommu_is_span_boundary() merely checks if the
 183         * (index + npages) < num_tsb_entries
 184         */
 185        if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
 186                shift = 0;
 187                boundary_size = iommu->poolsize * iommu->nr_pools;
 188        }
 189        n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
 190                             boundary_size, align_mask);
 191        if (n == -1) {
 192                if (likely(pass == 0)) {
 193                        /* First failure, rescan from the beginning.  */
 194                        pool->hint = pool->start;
 195                        set_flush(iommu);
 196                        pass++;
 197                        goto again;
 198                } else if (!largealloc && pass <= iommu->nr_pools) {
 199                        spin_unlock(&(pool->lock));
 200                        pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
 201                        pool = &(iommu->pools[pool_nr]);
 202                        spin_lock(&(pool->lock));
 203                        pool->hint = pool->start;
 204                        set_flush(iommu);
 205                        pass++;
 206                        goto again;
 207                } else {
 208                        /* give up */
 209                        n = DMA_ERROR_CODE;
 210                        goto bail;
 211                }
 212        }
 213        if (iommu->lazy_flush &&
 214            (n < pool->hint || need_flush(iommu))) {
 215                clear_flush(iommu);
 216                iommu->lazy_flush(iommu);
 217        }
 218
 219        end = n + npages;
 220        pool->hint = end;
 221
 222        /* Update handle for SG allocations */
 223        if (handle)
 224                *handle = end;
 225bail:
 226        spin_unlock_irqrestore(&(pool->lock), flags);
 227
 228        return n;
 229}
 230EXPORT_SYMBOL(iommu_tbl_range_alloc);
 231
 232static struct iommu_pool *get_pool(struct iommu_map_table *tbl,
 233                                   unsigned long entry)
 234{
 235        struct iommu_pool *p;
 236        unsigned long largepool_start = tbl->large_pool.start;
 237        bool large_pool = ((tbl->flags & IOMMU_HAS_LARGE_POOL) != 0);
 238
 239        /* The large pool is the last pool at the top of the table */
 240        if (large_pool && entry >= largepool_start) {
 241                p = &tbl->large_pool;
 242        } else {
 243                unsigned int pool_nr = entry / tbl->poolsize;
 244
 245                BUG_ON(pool_nr >= tbl->nr_pools);
 246                p = &tbl->pools[pool_nr];
 247        }
 248        return p;
 249}
 250
 251/* Caller supplies the index of the entry into the iommu map table
 252 * itself when the mapping from dma_addr to the entry is not the
 253 * default addr->entry mapping below.
 254 */
 255void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr,
 256                          unsigned long npages, unsigned long entry)
 257{
 258        struct iommu_pool *pool;
 259        unsigned long flags;
 260        unsigned long shift = iommu->table_shift;
 261
 262        if (entry == DMA_ERROR_CODE) /* use default addr->entry mapping */
 263                entry = (dma_addr - iommu->table_map_base) >> shift;
 264        pool = get_pool(iommu, entry);
 265
 266        spin_lock_irqsave(&(pool->lock), flags);
 267        bitmap_clear(iommu->map, entry, npages);
 268        spin_unlock_irqrestore(&(pool->lock), flags);
 269}
 270EXPORT_SYMBOL(iommu_tbl_range_free);
 271