linux/include/linux/scatterlist.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCATTERLIST_H
   3#define _LINUX_SCATTERLIST_H
   4
   5#include <linux/string.h>
   6#include <linux/types.h>
   7#include <linux/bug.h>
   8#include <linux/mm.h>
   9#include <asm/io.h>
  10
  11struct scatterlist {
  12#ifdef CONFIG_DEBUG_SG
  13        unsigned long   sg_magic;
  14#endif
  15        unsigned long   page_link;
  16        unsigned int    offset;
  17        unsigned int    length;
  18        dma_addr_t      dma_address;
  19#ifdef CONFIG_NEED_SG_DMA_LENGTH
  20        unsigned int    dma_length;
  21#endif
  22};
  23
  24/*
  25 * These macros should be used after a dma_map_sg call has been done
  26 * to get bus addresses of each of the SG entries and their lengths.
  27 * You should only work with the number of sg entries dma_map_sg
  28 * returns, or alternatively stop on the first sg_dma_len(sg) which
  29 * is 0.
  30 */
  31#define sg_dma_address(sg)      ((sg)->dma_address)
  32
  33#ifdef CONFIG_NEED_SG_DMA_LENGTH
  34#define sg_dma_len(sg)          ((sg)->dma_length)
  35#else
  36#define sg_dma_len(sg)          ((sg)->length)
  37#endif
  38
  39struct sg_table {
  40        struct scatterlist *sgl;        /* the list */
  41        unsigned int nents;             /* number of mapped entries */
  42        unsigned int orig_nents;        /* original size of list */
  43};
  44
  45/*
  46 * Notes on SG table design.
  47 *
  48 * We use the unsigned long page_link field in the scatterlist struct to place
  49 * the page pointer AND encode information about the sg table as well. The two
  50 * lower bits are reserved for this information.
  51 *
  52 * If bit 0 is set, then the page_link contains a pointer to the next sg
  53 * table list. Otherwise the next entry is at sg + 1.
  54 *
  55 * If bit 1 is set, then this sg entry is the last element in a list.
  56 *
  57 * See sg_next().
  58 *
  59 */
  60
  61#define SG_MAGIC        0x87654321
  62
  63/*
  64 * We overload the LSB of the page pointer to indicate whether it's
  65 * a valid sg entry, or whether it points to the start of a new scatterlist.
  66 * Those low bits are there for everyone! (thanks mason :-)
  67 */
  68#define sg_is_chain(sg)         ((sg)->page_link & 0x01)
  69#define sg_is_last(sg)          ((sg)->page_link & 0x02)
  70#define sg_chain_ptr(sg)        \
  71        ((struct scatterlist *) ((sg)->page_link & ~0x03))
  72
  73/**
  74 * sg_assign_page - Assign a given page to an SG entry
  75 * @sg:             SG entry
  76 * @page:           The page
  77 *
  78 * Description:
  79 *   Assign page to sg entry. Also see sg_set_page(), the most commonly used
  80 *   variant.
  81 *
  82 **/
  83static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
  84{
  85        unsigned long page_link = sg->page_link & 0x3;
  86
  87        /*
  88         * In order for the low bit stealing approach to work, pages
  89         * must be aligned at a 32-bit boundary as a minimum.
  90         */
  91        BUG_ON((unsigned long) page & 0x03);
  92#ifdef CONFIG_DEBUG_SG
  93        BUG_ON(sg->sg_magic != SG_MAGIC);
  94        BUG_ON(sg_is_chain(sg));
  95#endif
  96        sg->page_link = page_link | (unsigned long) page;
  97}
  98
  99/**
 100 * sg_set_page - Set sg entry to point at given page
 101 * @sg:          SG entry
 102 * @page:        The page
 103 * @len:         Length of data
 104 * @offset:      Offset into page
 105 *
 106 * Description:
 107 *   Use this function to set an sg entry pointing at a page, never assign
 108 *   the page directly. We encode sg table information in the lower bits
 109 *   of the page pointer. See sg_page() for looking up the page belonging
 110 *   to an sg entry.
 111 *
 112 **/
 113static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 114                               unsigned int len, unsigned int offset)
 115{
 116        sg_assign_page(sg, page);
 117        sg->offset = offset;
 118        sg->length = len;
 119}
 120
 121static inline struct page *sg_page(struct scatterlist *sg)
 122{
 123#ifdef CONFIG_DEBUG_SG
 124        BUG_ON(sg->sg_magic != SG_MAGIC);
 125        BUG_ON(sg_is_chain(sg));
 126#endif
 127        return (struct page *)((sg)->page_link & ~0x3);
 128}
 129
 130/**
 131 * sg_set_buf - Set sg entry to point at given data
 132 * @sg:          SG entry
 133 * @buf:         Data
 134 * @buflen:      Data length
 135 *
 136 **/
 137static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
 138                              unsigned int buflen)
 139{
 140#ifdef CONFIG_DEBUG_SG
 141        BUG_ON(!virt_addr_valid(buf));
 142#endif
 143        sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
 144}
 145
 146/*
 147 * Loop over each sg element, following the pointer to a new list if necessary
 148 */
 149#define for_each_sg(sglist, sg, nr, __i)        \
 150        for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
 151
 152/**
 153 * sg_chain - Chain two sglists together
 154 * @prv:        First scatterlist
 155 * @prv_nents:  Number of entries in prv
 156 * @sgl:        Second scatterlist
 157 *
 158 * Description:
 159 *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
 160 *
 161 **/
 162static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
 163                            struct scatterlist *sgl)
 164{
 165        /*
 166         * offset and length are unused for chain entry.  Clear them.
 167         */
 168        prv[prv_nents - 1].offset = 0;
 169        prv[prv_nents - 1].length = 0;
 170
 171        /*
 172         * Set lowest bit to indicate a link pointer, and make sure to clear
 173         * the termination bit if it happens to be set.
 174         */
 175        prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
 176}
 177
 178/**
 179 * sg_mark_end - Mark the end of the scatterlist
 180 * @sg:          SG entryScatterlist
 181 *
 182 * Description:
 183 *   Marks the passed in sg entry as the termination point for the sg
 184 *   table. A call to sg_next() on this entry will return NULL.
 185 *
 186 **/
 187static inline void sg_mark_end(struct scatterlist *sg)
 188{
 189#ifdef CONFIG_DEBUG_SG
 190        BUG_ON(sg->sg_magic != SG_MAGIC);
 191#endif
 192        /*
 193         * Set termination bit, clear potential chain bit
 194         */
 195        sg->page_link |= 0x02;
 196        sg->page_link &= ~0x01;
 197}
 198
 199/**
 200 * sg_unmark_end - Undo setting the end of the scatterlist
 201 * @sg:          SG entryScatterlist
 202 *
 203 * Description:
 204 *   Removes the termination marker from the given entry of the scatterlist.
 205 *
 206 **/
 207static inline void sg_unmark_end(struct scatterlist *sg)
 208{
 209#ifdef CONFIG_DEBUG_SG
 210        BUG_ON(sg->sg_magic != SG_MAGIC);
 211#endif
 212        sg->page_link &= ~0x02;
 213}
 214
 215/**
 216 * sg_phys - Return physical address of an sg entry
 217 * @sg:      SG entry
 218 *
 219 * Description:
 220 *   This calls page_to_phys() on the page in this sg entry, and adds the
 221 *   sg offset. The caller must know that it is legal to call page_to_phys()
 222 *   on the sg page.
 223 *
 224 **/
 225static inline dma_addr_t sg_phys(struct scatterlist *sg)
 226{
 227        return page_to_phys(sg_page(sg)) + sg->offset;
 228}
 229
 230/**
 231 * sg_virt - Return virtual address of an sg entry
 232 * @sg:      SG entry
 233 *
 234 * Description:
 235 *   This calls page_address() on the page in this sg entry, and adds the
 236 *   sg offset. The caller must know that the sg page has a valid virtual
 237 *   mapping.
 238 *
 239 **/
 240static inline void *sg_virt(struct scatterlist *sg)
 241{
 242        return page_address(sg_page(sg)) + sg->offset;
 243}
 244
 245int sg_nents(struct scatterlist *sg);
 246int sg_nents_for_len(struct scatterlist *sg, u64 len);
 247struct scatterlist *sg_next(struct scatterlist *);
 248struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
 249void sg_init_table(struct scatterlist *, unsigned int);
 250void sg_init_one(struct scatterlist *, const void *, unsigned int);
 251int sg_split(struct scatterlist *in, const int in_mapped_nents,
 252             const off_t skip, const int nb_splits,
 253             const size_t *split_sizes,
 254             struct scatterlist **out, int *out_mapped_nents,
 255             gfp_t gfp_mask);
 256
 257typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
 258typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
 259
 260void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
 261void sg_free_table(struct sg_table *);
 262int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
 263                     struct scatterlist *, gfp_t, sg_alloc_fn *);
 264int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
 265int sg_alloc_table_from_pages(struct sg_table *sgt,
 266        struct page **pages, unsigned int n_pages,
 267        unsigned long offset, unsigned long size,
 268        gfp_t gfp_mask);
 269
 270size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
 271                      size_t buflen, off_t skip, bool to_buffer);
 272
 273size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
 274                           const void *buf, size_t buflen);
 275size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
 276                         void *buf, size_t buflen);
 277
 278size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
 279                            const void *buf, size_t buflen, off_t skip);
 280size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
 281                          void *buf, size_t buflen, off_t skip);
 282size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
 283                       size_t buflen, off_t skip);
 284
 285/*
 286 * Maximum number of entries that will be allocated in one piece, if
 287 * a list larger than this is required then chaining will be utilized.
 288 */
 289#define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
 290
 291/*
 292 * The maximum number of SG segments that we will put inside a
 293 * scatterlist (unless chaining is used). Should ideally fit inside a
 294 * single page, to avoid a higher order allocation.  We could define this
 295 * to SG_MAX_SINGLE_ALLOC to pack correctly at the highest order.  The
 296 * minimum value is 32
 297 */
 298#define SG_CHUNK_SIZE   128
 299
 300/*
 301 * Like SG_CHUNK_SIZE, but for archs that have sg chaining. This limit
 302 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
 303 */
 304#ifdef CONFIG_ARCH_HAS_SG_CHAIN
 305#define SG_MAX_SEGMENTS 2048
 306#else
 307#define SG_MAX_SEGMENTS SG_CHUNK_SIZE
 308#endif
 309
 310#ifdef CONFIG_SG_POOL
 311void sg_free_table_chained(struct sg_table *table, bool first_chunk);
 312int sg_alloc_table_chained(struct sg_table *table, int nents,
 313                           struct scatterlist *first_chunk);
 314#endif
 315
 316/*
 317 * sg page iterator
 318 *
 319 * Iterates over sg entries page-by-page.  On each successful iteration,
 320 * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
 321 * to get the current page and its dma address. @piter->sg will point to the
 322 * sg holding this page and @piter->sg_pgoffset to the page's page offset
 323 * within the sg. The iteration will stop either when a maximum number of sg
 324 * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
 325 */
 326struct sg_page_iter {
 327        struct scatterlist      *sg;            /* sg holding the page */
 328        unsigned int            sg_pgoffset;    /* page offset within the sg */
 329
 330        /* these are internal states, keep away */
 331        unsigned int            __nents;        /* remaining sg entries */
 332        int                     __pg_advance;   /* nr pages to advance at the
 333                                                 * next step */
 334};
 335
 336bool __sg_page_iter_next(struct sg_page_iter *piter);
 337void __sg_page_iter_start(struct sg_page_iter *piter,
 338                          struct scatterlist *sglist, unsigned int nents,
 339                          unsigned long pgoffset);
 340/**
 341 * sg_page_iter_page - get the current page held by the page iterator
 342 * @piter:      page iterator holding the page
 343 */
 344static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
 345{
 346        return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
 347}
 348
 349/**
 350 * sg_page_iter_dma_address - get the dma address of the current page held by
 351 * the page iterator.
 352 * @piter:      page iterator holding the page
 353 */
 354static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
 355{
 356        return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
 357}
 358
 359/**
 360 * for_each_sg_page - iterate over the pages of the given sg list
 361 * @sglist:     sglist to iterate over
 362 * @piter:      page iterator to hold current page, sg, sg_pgoffset
 363 * @nents:      maximum number of sg entries to iterate over
 364 * @pgoffset:   starting page offset
 365 */
 366#define for_each_sg_page(sglist, piter, nents, pgoffset)                   \
 367        for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
 368             __sg_page_iter_next(piter);)
 369
 370/*
 371 * Mapping sg iterator
 372 *
 373 * Iterates over sg entries mapping page-by-page.  On each successful
 374 * iteration, @miter->page points to the mapped page and
 375 * @miter->length bytes of data can be accessed at @miter->addr.  As
 376 * long as an interation is enclosed between start and stop, the user
 377 * is free to choose control structure and when to stop.
 378 *
 379 * @miter->consumed is set to @miter->length on each iteration.  It
 380 * can be adjusted if the user can't consume all the bytes in one go.
 381 * Also, a stopped iteration can be resumed by calling next on it.
 382 * This is useful when iteration needs to release all resources and
 383 * continue later (e.g. at the next interrupt).
 384 */
 385
 386#define SG_MITER_ATOMIC         (1 << 0)         /* use kmap_atomic */
 387#define SG_MITER_TO_SG          (1 << 1)        /* flush back to phys on unmap */
 388#define SG_MITER_FROM_SG        (1 << 2)        /* nop */
 389
 390struct sg_mapping_iter {
 391        /* the following three fields can be accessed directly */
 392        struct page             *page;          /* currently mapped page */
 393        void                    *addr;          /* pointer to the mapped area */
 394        size_t                  length;         /* length of the mapped area */
 395        size_t                  consumed;       /* number of consumed bytes */
 396        struct sg_page_iter     piter;          /* page iterator */
 397
 398        /* these are internal states, keep away */
 399        unsigned int            __offset;       /* offset within page */
 400        unsigned int            __remaining;    /* remaining bytes on page */
 401        unsigned int            __flags;
 402};
 403
 404void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
 405                    unsigned int nents, unsigned int flags);
 406bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset);
 407bool sg_miter_next(struct sg_mapping_iter *miter);
 408void sg_miter_stop(struct sg_mapping_iter *miter);
 409
 410#endif /* _LINUX_SCATTERLIST_H */
 411