linux/include/linux/swiotlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __LINUX_SWIOTLB_H
   3#define __LINUX_SWIOTLB_H
   4
   5#include <linux/dma-direction.h>
   6#include <linux/init.h>
   7#include <linux/types.h>
   8#include <linux/limits.h>
   9#include <linux/spinlock.h>
  10
  11struct device;
  12struct page;
  13struct scatterlist;
  14
  15enum swiotlb_force {
  16        SWIOTLB_NORMAL,         /* Default - depending on HW DMA mask etc. */
  17        SWIOTLB_FORCE,          /* swiotlb=force */
  18        SWIOTLB_NO_FORCE,       /* swiotlb=noforce */
  19};
  20
  21/*
  22 * Maximum allowable number of contiguous slabs to map,
  23 * must be a power of 2.  What is the appropriate value ?
  24 * The complexity of {map,unmap}_single is linearly dependent on this value.
  25 */
  26#define IO_TLB_SEGSIZE  128
  27
  28/*
  29 * log of the size of each IO TLB slab.  The number of slabs is command line
  30 * controllable.
  31 */
  32#define IO_TLB_SHIFT 11
  33#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
  34
  35/* default to 64MB */
  36#define IO_TLB_DEFAULT_SIZE (64UL<<20)
  37
  38extern void swiotlb_init(int verbose);
  39int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
  40unsigned long swiotlb_size_or_default(void);
  41extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
  42extern int swiotlb_late_init_with_default_size(size_t default_size);
  43extern void __init swiotlb_update_mem_attributes(void);
  44
  45phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
  46                size_t mapping_size, size_t alloc_size,
  47                enum dma_data_direction dir, unsigned long attrs);
  48
  49extern void swiotlb_tbl_unmap_single(struct device *hwdev,
  50                                     phys_addr_t tlb_addr,
  51                                     size_t mapping_size,
  52                                     enum dma_data_direction dir,
  53                                     unsigned long attrs);
  54
  55void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
  56                size_t size, enum dma_data_direction dir);
  57void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
  58                size_t size, enum dma_data_direction dir);
  59dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
  60                size_t size, enum dma_data_direction dir, unsigned long attrs);
  61
  62#ifdef CONFIG_SWIOTLB
  63extern enum swiotlb_force swiotlb_force;
  64
  65/**
  66 * struct io_tlb_mem - IO TLB Memory Pool Descriptor
  67 *
  68 * @start:      The start address of the swiotlb memory pool. Used to do a quick
  69 *              range check to see if the memory was in fact allocated by this
  70 *              API.
  71 * @end:        The end address of the swiotlb memory pool. Used to do a quick
  72 *              range check to see if the memory was in fact allocated by this
  73 *              API.
  74 * @nslabs:     The number of IO TLB blocks (in groups of 64) between @start and
  75 *              @end. This is command line adjustable via setup_io_tlb_npages.
  76 * @used:       The number of used IO TLB block.
  77 * @list:       The free list describing the number of free entries available
  78 *              from each index.
  79 * @index:      The index to start searching in the next round.
  80 * @orig_addr:  The original address corresponding to a mapped entry.
  81 * @alloc_size: Size of the allocated buffer.
  82 * @lock:       The lock to protect the above data structures in the map and
  83 *              unmap calls.
  84 * @debugfs:    The dentry to debugfs.
  85 * @late_alloc: %true if allocated using the page allocator
  86 */
  87struct io_tlb_mem {
  88        phys_addr_t start;
  89        phys_addr_t end;
  90        unsigned long nslabs;
  91        unsigned long used;
  92        unsigned int index;
  93        spinlock_t lock;
  94        struct dentry *debugfs;
  95        bool late_alloc;
  96        struct io_tlb_slot {
  97                phys_addr_t orig_addr;
  98                size_t alloc_size;
  99                unsigned int list;
 100        } slots[];
 101};
 102extern struct io_tlb_mem *io_tlb_default_mem;
 103
 104static inline bool is_swiotlb_buffer(phys_addr_t paddr)
 105{
 106        struct io_tlb_mem *mem = io_tlb_default_mem;
 107
 108        return mem && paddr >= mem->start && paddr < mem->end;
 109}
 110
 111void __init swiotlb_exit(void);
 112unsigned int swiotlb_max_segment(void);
 113size_t swiotlb_max_mapping_size(struct device *dev);
 114bool is_swiotlb_active(void);
 115void __init swiotlb_adjust_size(unsigned long size);
 116#else
 117#define swiotlb_force SWIOTLB_NO_FORCE
 118static inline bool is_swiotlb_buffer(phys_addr_t paddr)
 119{
 120        return false;
 121}
 122static inline void swiotlb_exit(void)
 123{
 124}
 125static inline unsigned int swiotlb_max_segment(void)
 126{
 127        return 0;
 128}
 129static inline size_t swiotlb_max_mapping_size(struct device *dev)
 130{
 131        return SIZE_MAX;
 132}
 133
 134static inline bool is_swiotlb_active(void)
 135{
 136        return false;
 137}
 138
 139static inline void swiotlb_adjust_size(unsigned long size)
 140{
 141}
 142#endif /* CONFIG_SWIOTLB */
 143
 144extern void swiotlb_print_info(void);
 145extern void swiotlb_set_max_segment(unsigned int);
 146
 147#endif /* __LINUX_SWIOTLB_H */
 148