linux/include/linux/scatterlist.h
<<
>>
Prefs
   1#ifndef _LINUX_SCATTERLIST_H
   2#define _LINUX_SCATTERLIST_H
   3
   4#include <linux/string.h>
   5#include <linux/bug.h>
   6#include <linux/mm.h>
   7
   8#include <asm/types.h>
   9#include <asm/scatterlist.h>
  10#include <asm/io.h>
  11
  12struct sg_table {
  13        struct scatterlist *sgl;        /* the list */
  14        unsigned int nents;             /* number of mapped entries */
  15        unsigned int orig_nents;        /* original size of list */
  16};
  17
  18/*
  19 * Notes on SG table design.
  20 *
  21 * Architectures must provide an unsigned long page_link field in the
  22 * scatterlist struct. We use that to place the page pointer AND encode
  23 * information about the sg table as well. The two lower bits are reserved
  24 * for this information.
  25 *
  26 * If bit 0 is set, then the page_link contains a pointer to the next sg
  27 * table list. Otherwise the next entry is at sg + 1.
  28 *
  29 * If bit 1 is set, then this sg entry is the last element in a list.
  30 *
  31 * See sg_next().
  32 *
  33 */
  34
  35#define SG_MAGIC        0x87654321
  36
  37/*
  38 * We overload the LSB of the page pointer to indicate whether it's
  39 * a valid sg entry, or whether it points to the start of a new scatterlist.
  40 * Those low bits are there for everyone! (thanks mason :-)
  41 */
  42#define sg_is_chain(sg)         ((sg)->page_link & 0x01)
  43#define sg_is_last(sg)          ((sg)->page_link & 0x02)
  44#define sg_chain_ptr(sg)        \
  45        ((struct scatterlist *) ((sg)->page_link & ~0x03))
  46
  47/**
  48 * sg_assign_page - Assign a given page to an SG entry
  49 * @sg:             SG entry
  50 * @page:           The page
  51 *
  52 * Description:
  53 *   Assign page to sg entry. Also see sg_set_page(), the most commonly used
  54 *   variant.
  55 *
  56 **/
  57static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
  58{
  59        unsigned long page_link = sg->page_link & 0x3;
  60
  61        /*
  62         * In order for the low bit stealing approach to work, pages
  63         * must be aligned at a 32-bit boundary as a minimum.
  64         */
  65        BUG_ON((unsigned long) page & 0x03);
  66#ifdef CONFIG_DEBUG_SG
  67        BUG_ON(sg->sg_magic != SG_MAGIC);
  68        BUG_ON(sg_is_chain(sg));
  69#endif
  70        sg->page_link = page_link | (unsigned long) page;
  71}
  72
  73/**
  74 * sg_set_page - Set sg entry to point at given page
  75 * @sg:          SG entry
  76 * @page:        The page
  77 * @len:         Length of data
  78 * @offset:      Offset into page
  79 *
  80 * Description:
  81 *   Use this function to set an sg entry pointing at a page, never assign
  82 *   the page directly. We encode sg table information in the lower bits
  83 *   of the page pointer. See sg_page() for looking up the page belonging
  84 *   to an sg entry.
  85 *
  86 **/
  87static inline void sg_set_page(struct scatterlist *sg, struct page *page,
  88                               unsigned int len, unsigned int offset)
  89{
  90        sg_assign_page(sg, page);
  91        sg->offset = offset;
  92        sg->length = len;
  93}
  94
  95static inline struct page *sg_page(struct scatterlist *sg)
  96{
  97#ifdef CONFIG_DEBUG_SG
  98        BUG_ON(sg->sg_magic != SG_MAGIC);
  99        BUG_ON(sg_is_chain(sg));
 100#endif
 101        return (struct page *)((sg)->page_link & ~0x3);
 102}
 103
 104/**
 105 * sg_set_buf - Set sg entry to point at given data
 106 * @sg:          SG entry
 107 * @buf:         Data
 108 * @buflen:      Data length
 109 *
 110 **/
 111static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
 112                              unsigned int buflen)
 113{
 114#ifdef CONFIG_DEBUG_SG
 115        BUG_ON(!virt_addr_valid(buf));
 116#endif
 117        sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
 118}
 119
 120/*
 121 * Loop over each sg element, following the pointer to a new list if necessary
 122 */
 123#define for_each_sg(sglist, sg, nr, __i)        \
 124        for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
 125
 126/**
 127 * sg_chain - Chain two sglists together
 128 * @prv:        First scatterlist
 129 * @prv_nents:  Number of entries in prv
 130 * @sgl:        Second scatterlist
 131 *
 132 * Description:
 133 *   Links @prv@ and @sgl@ together, to form a longer scatterlist.
 134 *
 135 **/
 136static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
 137                            struct scatterlist *sgl)
 138{
 139#ifndef ARCH_HAS_SG_CHAIN
 140        BUG();
 141#endif
 142
 143        /*
 144         * offset and length are unused for chain entry.  Clear them.
 145         */
 146        prv[prv_nents - 1].offset = 0;
 147        prv[prv_nents - 1].length = 0;
 148
 149        /*
 150         * Set lowest bit to indicate a link pointer, and make sure to clear
 151         * the termination bit if it happens to be set.
 152         */
 153        prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
 154}
 155
 156/**
 157 * sg_mark_end - Mark the end of the scatterlist
 158 * @sg:          SG entryScatterlist
 159 *
 160 * Description:
 161 *   Marks the passed in sg entry as the termination point for the sg
 162 *   table. A call to sg_next() on this entry will return NULL.
 163 *
 164 **/
 165static inline void sg_mark_end(struct scatterlist *sg)
 166{
 167#ifdef CONFIG_DEBUG_SG
 168        BUG_ON(sg->sg_magic != SG_MAGIC);
 169#endif
 170        /*
 171         * Set termination bit, clear potential chain bit
 172         */
 173        sg->page_link |= 0x02;
 174        sg->page_link &= ~0x01;
 175}
 176
 177/**
 178 * sg_unmark_end - Undo setting the end of the scatterlist
 179 * @sg:          SG entryScatterlist
 180 *
 181 * Description:
 182 *   Removes the termination marker from the given entry of the scatterlist.
 183 *
 184 **/
 185static inline void sg_unmark_end(struct scatterlist *sg)
 186{
 187#ifdef CONFIG_DEBUG_SG
 188        BUG_ON(sg->sg_magic != SG_MAGIC);
 189#endif
 190        sg->page_link &= ~0x02;
 191}
 192
 193/**
 194 * sg_phys - Return physical address of an sg entry
 195 * @sg:      SG entry
 196 *
 197 * Description:
 198 *   This calls page_to_phys() on the page in this sg entry, and adds the
 199 *   sg offset. The caller must know that it is legal to call page_to_phys()
 200 *   on the sg page.
 201 *
 202 **/
 203static inline dma_addr_t sg_phys(struct scatterlist *sg)
 204{
 205        return page_to_phys(sg_page(sg)) + sg->offset;
 206}
 207
 208/**
 209 * sg_virt - Return virtual address of an sg entry
 210 * @sg:      SG entry
 211 *
 212 * Description:
 213 *   This calls page_address() on the page in this sg entry, and adds the
 214 *   sg offset. The caller must know that the sg page has a valid virtual
 215 *   mapping.
 216 *
 217 **/
 218static inline void *sg_virt(struct scatterlist *sg)
 219{
 220        return page_address(sg_page(sg)) + sg->offset;
 221}
 222
 223int sg_nents(struct scatterlist *sg);
 224struct scatterlist *sg_next(struct scatterlist *);
 225struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
 226void sg_init_table(struct scatterlist *, unsigned int);
 227void sg_init_one(struct scatterlist *, const void *, unsigned int);
 228
 229typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
 230typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
 231
 232void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
 233void sg_free_table(struct sg_table *);
 234int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
 235                     sg_alloc_fn *);
 236int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
 237int sg_alloc_table_from_pages(struct sg_table *sgt,
 238        struct page **pages, unsigned int n_pages,
 239        unsigned long offset, unsigned long size,
 240        gfp_t gfp_mask);
 241
 242size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
 243                           void *buf, size_t buflen);
 244size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
 245                         void *buf, size_t buflen);
 246
 247/*
 248 * Maximum number of entries that will be allocated in one piece, if
 249 * a list larger than this is required then chaining will be utilized.
 250 */
 251#define SG_MAX_SINGLE_ALLOC             (PAGE_SIZE / sizeof(struct scatterlist))
 252
 253/*
 254 * sg page iterator
 255 *
 256 * Iterates over sg entries page-by-page.  On each successful iteration,
 257 * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
 258 * to get the current page and its dma address. @piter->sg will point to the
 259 * sg holding this page and @piter->sg_pgoffset to the page's page offset
 260 * within the sg. The iteration will stop either when a maximum number of sg
 261 * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
 262 */
 263struct sg_page_iter {
 264        struct scatterlist      *sg;            /* sg holding the page */
 265        unsigned int            sg_pgoffset;    /* page offset within the sg */
 266
 267        /* these are internal states, keep away */
 268        unsigned int            __nents;        /* remaining sg entries */
 269        int                     __pg_advance;   /* nr pages to advance at the
 270                                                 * next step */
 271};
 272
 273bool __sg_page_iter_next(struct sg_page_iter *piter);
 274void __sg_page_iter_start(struct sg_page_iter *piter,
 275                          struct scatterlist *sglist, unsigned int nents,
 276                          unsigned long pgoffset);
 277/**
 278 * sg_page_iter_page - get the current page held by the page iterator
 279 * @piter:      page iterator holding the page
 280 */
 281static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
 282{
 283        return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
 284}
 285
 286/**
 287 * sg_page_iter_dma_address - get the dma address of the current page held by
 288 * the page iterator.
 289 * @piter:      page iterator holding the page
 290 */
 291static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
 292{
 293        return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
 294}
 295
 296/**
 297 * for_each_sg_page - iterate over the pages of the given sg list
 298 * @sglist:     sglist to iterate over
 299 * @piter:      page iterator to hold current page, sg, sg_pgoffset
 300 * @nents:      maximum number of sg entries to iterate over
 301 * @pgoffset:   starting page offset
 302 */
 303#define for_each_sg_page(sglist, piter, nents, pgoffset)                   \
 304        for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
 305             __sg_page_iter_next(piter);)
 306
 307/*
 308 * Mapping sg iterator
 309 *
 310 * Iterates over sg entries mapping page-by-page.  On each successful
 311 * iteration, @miter->page points to the mapped page and
 312 * @miter->length bytes of data can be accessed at @miter->addr.  As
 313 * long as an interation is enclosed between start and stop, the user
 314 * is free to choose control structure and when to stop.
 315 *
 316 * @miter->consumed is set to @miter->length on each iteration.  It
 317 * can be adjusted if the user can't consume all the bytes in one go.
 318 * Also, a stopped iteration can be resumed by calling next on it.
 319 * This is useful when iteration needs to release all resources and
 320 * continue later (e.g. at the next interrupt).
 321 */
 322
 323#define SG_MITER_ATOMIC         (1 << 0)         /* use kmap_atomic */
 324#define SG_MITER_TO_SG          (1 << 1)        /* flush back to phys on unmap */
 325#define SG_MITER_FROM_SG        (1 << 2)        /* nop */
 326
 327struct sg_mapping_iter {
 328        /* the following three fields can be accessed directly */
 329        struct page             *page;          /* currently mapped page */
 330        void                    *addr;          /* pointer to the mapped area */
 331        size_t                  length;         /* length of the mapped area */
 332        size_t                  consumed;       /* number of consumed bytes */
 333        struct sg_page_iter     piter;          /* page iterator */
 334
 335        /* these are internal states, keep away */
 336        unsigned int            __offset;       /* offset within page */
 337        unsigned int            __remaining;    /* remaining bytes on page */
 338        unsigned int            __flags;
 339};
 340
 341void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
 342                    unsigned int nents, unsigned int flags);
 343bool sg_miter_next(struct sg_mapping_iter *miter);
 344void sg_miter_stop(struct sg_mapping_iter *miter);
 345
 346#endif /* _LINUX_SCATTERLIST_H */
 347