linux/lib/scatterlist.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
   4 *
   5 * Scatterlist handling helpers.
   6 */
   7#include <linux/export.h>
   8#include <linux/slab.h>
   9#include <linux/scatterlist.h>
  10#include <linux/highmem.h>
  11#include <linux/kmemleak.h>
  12
  13/**
  14 * sg_next - return the next scatterlist entry in a list
  15 * @sg:         The current sg entry
  16 *
  17 * Description:
  18 *   Usually the next entry will be @sg@ + 1, but if this sg element is part
  19 *   of a chained scatterlist, it could jump to the start of a new
  20 *   scatterlist array.
  21 *
  22 **/
  23struct scatterlist *sg_next(struct scatterlist *sg)
  24{
  25        if (sg_is_last(sg))
  26                return NULL;
  27
  28        sg++;
  29        if (unlikely(sg_is_chain(sg)))
  30                sg = sg_chain_ptr(sg);
  31
  32        return sg;
  33}
  34EXPORT_SYMBOL(sg_next);
  35
  36/**
  37 * sg_nents - return total count of entries in scatterlist
  38 * @sg:         The scatterlist
  39 *
  40 * Description:
  41 * Allows to know how many entries are in sg, taking into account
  42 * chaining as well
  43 *
  44 **/
  45int sg_nents(struct scatterlist *sg)
  46{
  47        int nents;
  48        for (nents = 0; sg; sg = sg_next(sg))
  49                nents++;
  50        return nents;
  51}
  52EXPORT_SYMBOL(sg_nents);
  53
  54/**
  55 * sg_nents_for_len - return total count of entries in scatterlist
  56 *                    needed to satisfy the supplied length
  57 * @sg:         The scatterlist
  58 * @len:        The total required length
  59 *
  60 * Description:
  61 * Determines the number of entries in sg that are required to meet
  62 * the supplied length, taking into account chaining as well
  63 *
  64 * Returns:
  65 *   the number of sg entries needed, negative error on failure
  66 *
  67 **/
  68int sg_nents_for_len(struct scatterlist *sg, u64 len)
  69{
  70        int nents;
  71        u64 total;
  72
  73        if (!len)
  74                return 0;
  75
  76        for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
  77                nents++;
  78                total += sg->length;
  79                if (total >= len)
  80                        return nents;
  81        }
  82
  83        return -EINVAL;
  84}
  85EXPORT_SYMBOL(sg_nents_for_len);
  86
  87/**
  88 * sg_last - return the last scatterlist entry in a list
  89 * @sgl:        First entry in the scatterlist
  90 * @nents:      Number of entries in the scatterlist
  91 *
  92 * Description:
  93 *   Should only be used casually, it (currently) scans the entire list
  94 *   to get the last entry.
  95 *
  96 *   Note that the @sgl@ pointer passed in need not be the first one,
  97 *   the important bit is that @nents@ denotes the number of entries that
  98 *   exist from @sgl@.
  99 *
 100 **/
 101struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
 102{
 103        struct scatterlist *sg, *ret = NULL;
 104        unsigned int i;
 105
 106        for_each_sg(sgl, sg, nents, i)
 107                ret = sg;
 108
 109        BUG_ON(!sg_is_last(ret));
 110        return ret;
 111}
 112EXPORT_SYMBOL(sg_last);
 113
 114/**
 115 * sg_init_table - Initialize SG table
 116 * @sgl:           The SG table
 117 * @nents:         Number of entries in table
 118 *
 119 * Notes:
 120 *   If this is part of a chained sg table, sg_mark_end() should be
 121 *   used only on the last table part.
 122 *
 123 **/
 124void sg_init_table(struct scatterlist *sgl, unsigned int nents)
 125{
 126        memset(sgl, 0, sizeof(*sgl) * nents);
 127        sg_init_marker(sgl, nents);
 128}
 129EXPORT_SYMBOL(sg_init_table);
 130
 131/**
 132 * sg_init_one - Initialize a single entry sg list
 133 * @sg:          SG entry
 134 * @buf:         Virtual address for IO
 135 * @buflen:      IO length
 136 *
 137 **/
 138void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
 139{
 140        sg_init_table(sg, 1);
 141        sg_set_buf(sg, buf, buflen);
 142}
 143EXPORT_SYMBOL(sg_init_one);
 144
 145/*
 146 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
 147 * helpers.
 148 */
 149static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
 150{
 151        if (nents == SG_MAX_SINGLE_ALLOC) {
 152                /*
 153                 * Kmemleak doesn't track page allocations as they are not
 154                 * commonly used (in a raw form) for kernel data structures.
 155                 * As we chain together a list of pages and then a normal
 156                 * kmalloc (tracked by kmemleak), in order to for that last
 157                 * allocation not to become decoupled (and thus a
 158                 * false-positive) we need to inform kmemleak of all the
 159                 * intermediate allocations.
 160                 */
 161                void *ptr = (void *) __get_free_page(gfp_mask);
 162                kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
 163                return ptr;
 164        } else
 165                return kmalloc_array(nents, sizeof(struct scatterlist),
 166                                     gfp_mask);
 167}
 168
 169static void sg_kfree(struct scatterlist *sg, unsigned int nents)
 170{
 171        if (nents == SG_MAX_SINGLE_ALLOC) {
 172                kmemleak_free(sg);
 173                free_page((unsigned long) sg);
 174        } else
 175                kfree(sg);
 176}
 177
 178/**
 179 * __sg_free_table - Free a previously mapped sg table
 180 * @table:      The sg table header to use
 181 * @max_ents:   The maximum number of entries per single scatterlist
 182 * @nents_first_chunk: Number of entries int the (preallocated) first
 183 *      scatterlist chunk, 0 means no such preallocated first chunk
 184 * @free_fn:    Free function
 185 * @num_ents:   Number of entries in the table
 186 *
 187 *  Description:
 188 *    Free an sg table previously allocated and setup with
 189 *    __sg_alloc_table().  The @max_ents value must be identical to
 190 *    that previously used with __sg_alloc_table().
 191 *
 192 **/
 193void __sg_free_table(struct sg_table *table, unsigned int max_ents,
 194                     unsigned int nents_first_chunk, sg_free_fn *free_fn,
 195                     unsigned int num_ents)
 196{
 197        struct scatterlist *sgl, *next;
 198        unsigned curr_max_ents = nents_first_chunk ?: max_ents;
 199
 200        if (unlikely(!table->sgl))
 201                return;
 202
 203        sgl = table->sgl;
 204        while (num_ents) {
 205                unsigned int alloc_size = num_ents;
 206                unsigned int sg_size;
 207
 208                /*
 209                 * If we have more than max_ents segments left,
 210                 * then assign 'next' to the sg table after the current one.
 211                 * sg_size is then one less than alloc size, since the last
 212                 * element is the chain pointer.
 213                 */
 214                if (alloc_size > curr_max_ents) {
 215                        next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
 216                        alloc_size = curr_max_ents;
 217                        sg_size = alloc_size - 1;
 218                } else {
 219                        sg_size = alloc_size;
 220                        next = NULL;
 221                }
 222
 223                num_ents -= sg_size;
 224                if (nents_first_chunk)
 225                        nents_first_chunk = 0;
 226                else
 227                        free_fn(sgl, alloc_size);
 228                sgl = next;
 229                curr_max_ents = max_ents;
 230        }
 231
 232        table->sgl = NULL;
 233}
 234EXPORT_SYMBOL(__sg_free_table);
 235
 236/**
 237 * sg_free_append_table - Free a previously allocated append sg table.
 238 * @table:       The mapped sg append table header
 239 *
 240 **/
 241void sg_free_append_table(struct sg_append_table *table)
 242{
 243        __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, false, sg_kfree,
 244                        table->total_nents);
 245}
 246EXPORT_SYMBOL(sg_free_append_table);
 247
 248
 249/**
 250 * sg_free_table - Free a previously allocated sg table
 251 * @table:      The mapped sg table header
 252 *
 253 **/
 254void sg_free_table(struct sg_table *table)
 255{
 256        __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree,
 257                        table->orig_nents);
 258}
 259EXPORT_SYMBOL(sg_free_table);
 260
 261/**
 262 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
 263 * @table:      The sg table header to use
 264 * @nents:      Number of entries in sg list
 265 * @max_ents:   The maximum number of entries the allocator returns per call
 266 * @nents_first_chunk: Number of entries int the (preallocated) first
 267 *      scatterlist chunk, 0 means no such preallocated chunk provided by user
 268 * @gfp_mask:   GFP allocation mask
 269 * @alloc_fn:   Allocator to use
 270 *
 271 * Description:
 272 *   This function returns a @table @nents long. The allocator is
 273 *   defined to return scatterlist chunks of maximum size @max_ents.
 274 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
 275 *   chained in units of @max_ents.
 276 *
 277 * Notes:
 278 *   If this function returns non-0 (eg failure), the caller must call
 279 *   __sg_free_table() to cleanup any leftover allocations.
 280 *
 281 **/
 282int __sg_alloc_table(struct sg_table *table, unsigned int nents,
 283                     unsigned int max_ents, struct scatterlist *first_chunk,
 284                     unsigned int nents_first_chunk, gfp_t gfp_mask,
 285                     sg_alloc_fn *alloc_fn)
 286{
 287        struct scatterlist *sg, *prv;
 288        unsigned int left;
 289        unsigned curr_max_ents = nents_first_chunk ?: max_ents;
 290        unsigned prv_max_ents;
 291
 292        memset(table, 0, sizeof(*table));
 293
 294        if (nents == 0)
 295                return -EINVAL;
 296#ifdef CONFIG_ARCH_NO_SG_CHAIN
 297        if (WARN_ON_ONCE(nents > max_ents))
 298                return -EINVAL;
 299#endif
 300
 301        left = nents;
 302        prv = NULL;
 303        do {
 304                unsigned int sg_size, alloc_size = left;
 305
 306                if (alloc_size > curr_max_ents) {
 307                        alloc_size = curr_max_ents;
 308                        sg_size = alloc_size - 1;
 309                } else
 310                        sg_size = alloc_size;
 311
 312                left -= sg_size;
 313
 314                if (first_chunk) {
 315                        sg = first_chunk;
 316                        first_chunk = NULL;
 317                } else {
 318                        sg = alloc_fn(alloc_size, gfp_mask);
 319                }
 320                if (unlikely(!sg)) {
 321                        /*
 322                         * Adjust entry count to reflect that the last
 323                         * entry of the previous table won't be used for
 324                         * linkage.  Without this, sg_kfree() may get
 325                         * confused.
 326                         */
 327                        if (prv)
 328                                table->nents = ++table->orig_nents;
 329
 330                        return -ENOMEM;
 331                }
 332
 333                sg_init_table(sg, alloc_size);
 334                table->nents = table->orig_nents += sg_size;
 335
 336                /*
 337                 * If this is the first mapping, assign the sg table header.
 338                 * If this is not the first mapping, chain previous part.
 339                 */
 340                if (prv)
 341                        sg_chain(prv, prv_max_ents, sg);
 342                else
 343                        table->sgl = sg;
 344
 345                /*
 346                 * If no more entries after this one, mark the end
 347                 */
 348                if (!left)
 349                        sg_mark_end(&sg[sg_size - 1]);
 350
 351                prv = sg;
 352                prv_max_ents = curr_max_ents;
 353                curr_max_ents = max_ents;
 354        } while (left);
 355
 356        return 0;
 357}
 358EXPORT_SYMBOL(__sg_alloc_table);
 359
 360/**
 361 * sg_alloc_table - Allocate and initialize an sg table
 362 * @table:      The sg table header to use
 363 * @nents:      Number of entries in sg list
 364 * @gfp_mask:   GFP allocation mask
 365 *
 366 *  Description:
 367 *    Allocate and initialize an sg table. If @nents@ is larger than
 368 *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
 369 *
 370 **/
 371int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
 372{
 373        int ret;
 374
 375        ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
 376                               NULL, 0, gfp_mask, sg_kmalloc);
 377        if (unlikely(ret))
 378                sg_free_table(table);
 379        return ret;
 380}
 381EXPORT_SYMBOL(sg_alloc_table);
 382
 383static struct scatterlist *get_next_sg(struct sg_append_table *table,
 384                                       struct scatterlist *cur,
 385                                       unsigned long needed_sges,
 386                                       gfp_t gfp_mask)
 387{
 388        struct scatterlist *new_sg, *next_sg;
 389        unsigned int alloc_size;
 390
 391        if (cur) {
 392                next_sg = sg_next(cur);
 393                /* Check if last entry should be keeped for chainning */
 394                if (!sg_is_last(next_sg) || needed_sges == 1)
 395                        return next_sg;
 396        }
 397
 398        alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
 399        new_sg = sg_kmalloc(alloc_size, gfp_mask);
 400        if (!new_sg)
 401                return ERR_PTR(-ENOMEM);
 402        sg_init_table(new_sg, alloc_size);
 403        if (cur) {
 404                table->total_nents += alloc_size - 1;
 405                __sg_chain(next_sg, new_sg);
 406        } else {
 407                table->sgt.sgl = new_sg;
 408                table->total_nents = alloc_size;
 409        }
 410        return new_sg;
 411}
 412
 413/**
 414 * sg_alloc_append_table_from_pages - Allocate and initialize an append sg
 415 *                                    table from an array of pages
 416 * @sgt_append:  The sg append table to use
 417 * @pages:       Pointer to an array of page pointers
 418 * @n_pages:     Number of pages in the pages array
 419 * @offset:      Offset from start of the first page to the start of a buffer
 420 * @size:        Number of valid bytes in the buffer (after offset)
 421 * @max_segment: Maximum size of a scatterlist element in bytes
 422 * @left_pages:  Left pages caller have to set after this call
 423 * @gfp_mask:    GFP allocation mask
 424 *
 425 * Description:
 426 *    In the first call it allocate and initialize an sg table from a list of
 427 *    pages, else reuse the scatterlist from sgt_append. Contiguous ranges of
 428 *    the pages are squashed into a single scatterlist entry up to the maximum
 429 *    size specified in @max_segment.  A user may provide an offset at a start
 430 *    and a size of valid data in a buffer specified by the page array. The
 431 *    returned sg table is released by sg_free_append_table
 432 *
 433 * Returns:
 434 *   0 on success, negative error on failure
 435 *
 436 * Notes:
 437 *   If this function returns non-0 (eg failure), the caller must call
 438 *   sg_free_append_table() to cleanup any leftover allocations.
 439 *
 440 *   In the fist call, sgt_append must by initialized.
 441 */
 442int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
 443                struct page **pages, unsigned int n_pages, unsigned int offset,
 444                unsigned long size, unsigned int max_segment,
 445                unsigned int left_pages, gfp_t gfp_mask)
 446{
 447        unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
 448        unsigned int added_nents = 0;
 449        struct scatterlist *s = sgt_append->prv;
 450
 451        /*
 452         * The algorithm below requires max_segment to be aligned to PAGE_SIZE
 453         * otherwise it can overshoot.
 454         */
 455        max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
 456        if (WARN_ON(max_segment < PAGE_SIZE))
 457                return -EINVAL;
 458
 459        if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv)
 460                return -EOPNOTSUPP;
 461
 462        if (sgt_append->prv) {
 463                unsigned long paddr =
 464                        (page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE +
 465                         sgt_append->prv->offset + sgt_append->prv->length) /
 466                        PAGE_SIZE;
 467
 468                if (WARN_ON(offset))
 469                        return -EINVAL;
 470
 471                /* Merge contiguous pages into the last SG */
 472                prv_len = sgt_append->prv->length;
 473                while (n_pages && page_to_pfn(pages[0]) == paddr) {
 474                        if (sgt_append->prv->length + PAGE_SIZE > max_segment)
 475                                break;
 476                        sgt_append->prv->length += PAGE_SIZE;
 477                        paddr++;
 478                        pages++;
 479                        n_pages--;
 480                }
 481                if (!n_pages)
 482                        goto out;
 483        }
 484
 485        /* compute number of contiguous chunks */
 486        chunks = 1;
 487        seg_len = 0;
 488        for (i = 1; i < n_pages; i++) {
 489                seg_len += PAGE_SIZE;
 490                if (seg_len >= max_segment ||
 491                    page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
 492                        chunks++;
 493                        seg_len = 0;
 494                }
 495        }
 496
 497        /* merging chunks and putting them into the scatterlist */
 498        cur_page = 0;
 499        for (i = 0; i < chunks; i++) {
 500                unsigned int j, chunk_size;
 501
 502                /* look for the end of the current chunk */
 503                seg_len = 0;
 504                for (j = cur_page + 1; j < n_pages; j++) {
 505                        seg_len += PAGE_SIZE;
 506                        if (seg_len >= max_segment ||
 507                            page_to_pfn(pages[j]) !=
 508                            page_to_pfn(pages[j - 1]) + 1)
 509                                break;
 510                }
 511
 512                /* Pass how many chunks might be left */
 513                s = get_next_sg(sgt_append, s, chunks - i + left_pages,
 514                                gfp_mask);
 515                if (IS_ERR(s)) {
 516                        /*
 517                         * Adjust entry length to be as before function was
 518                         * called.
 519                         */
 520                        if (sgt_append->prv)
 521                                sgt_append->prv->length = prv_len;
 522                        return PTR_ERR(s);
 523                }
 524                chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
 525                sg_set_page(s, pages[cur_page],
 526                            min_t(unsigned long, size, chunk_size), offset);
 527                added_nents++;
 528                size -= chunk_size;
 529                offset = 0;
 530                cur_page = j;
 531        }
 532        sgt_append->sgt.nents += added_nents;
 533        sgt_append->sgt.orig_nents = sgt_append->sgt.nents;
 534        sgt_append->prv = s;
 535out:
 536        if (!left_pages)
 537                sg_mark_end(s);
 538        return 0;
 539}
 540EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
 541
 542/**
 543 * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
 544 *                                     an array of pages and given maximum
 545 *                                     segment.
 546 * @sgt:         The sg table header to use
 547 * @pages:       Pointer to an array of page pointers
 548 * @n_pages:     Number of pages in the pages array
 549 * @offset:      Offset from start of the first page to the start of a buffer
 550 * @size:        Number of valid bytes in the buffer (after offset)
 551 * @max_segment: Maximum size of a scatterlist element in bytes
 552 * @gfp_mask:    GFP allocation mask
 553 *
 554 *  Description:
 555 *    Allocate and initialize an sg table from a list of pages. Contiguous
 556 *    ranges of the pages are squashed into a single scatterlist node up to the
 557 *    maximum size specified in @max_segment. A user may provide an offset at a
 558 *    start and a size of valid data in a buffer specified by the page array.
 559 *
 560 *    The returned sg table is released by sg_free_table.
 561 *
 562 *  Returns:
 563 *   0 on success, negative error on failure
 564 */
 565int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
 566                                unsigned int n_pages, unsigned int offset,
 567                                unsigned long size, unsigned int max_segment,
 568                                gfp_t gfp_mask)
 569{
 570        struct sg_append_table append = {};
 571        int err;
 572
 573        err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset,
 574                                               size, max_segment, 0, gfp_mask);
 575        if (err) {
 576                sg_free_append_table(&append);
 577                return err;
 578        }
 579        memcpy(sgt, &append.sgt, sizeof(*sgt));
 580        WARN_ON(append.total_nents != sgt->orig_nents);
 581        return 0;
 582}
 583EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
 584
 585#ifdef CONFIG_SGL_ALLOC
 586
 587/**
 588 * sgl_alloc_order - allocate a scatterlist and its pages
 589 * @length: Length in bytes of the scatterlist. Must be at least one
 590 * @order: Second argument for alloc_pages()
 591 * @chainable: Whether or not to allocate an extra element in the scatterlist
 592 *      for scatterlist chaining purposes
 593 * @gfp: Memory allocation flags
 594 * @nent_p: [out] Number of entries in the scatterlist that have pages
 595 *
 596 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
 597 */
 598struct scatterlist *sgl_alloc_order(unsigned long long length,
 599                                    unsigned int order, bool chainable,
 600                                    gfp_t gfp, unsigned int *nent_p)
 601{
 602        struct scatterlist *sgl, *sg;
 603        struct page *page;
 604        unsigned int nent, nalloc;
 605        u32 elem_len;
 606
 607        nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
 608        /* Check for integer overflow */
 609        if (length > (nent << (PAGE_SHIFT + order)))
 610                return NULL;
 611        nalloc = nent;
 612        if (chainable) {
 613                /* Check for integer overflow */
 614                if (nalloc + 1 < nalloc)
 615                        return NULL;
 616                nalloc++;
 617        }
 618        sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
 619                            gfp & ~GFP_DMA);
 620        if (!sgl)
 621                return NULL;
 622
 623        sg_init_table(sgl, nalloc);
 624        sg = sgl;
 625        while (length) {
 626                elem_len = min_t(u64, length, PAGE_SIZE << order);
 627                page = alloc_pages(gfp, order);
 628                if (!page) {
 629                        sgl_free_order(sgl, order);
 630                        return NULL;
 631                }
 632
 633                sg_set_page(sg, page, elem_len, 0);
 634                length -= elem_len;
 635                sg = sg_next(sg);
 636        }
 637        WARN_ONCE(length, "length = %lld\n", length);
 638        if (nent_p)
 639                *nent_p = nent;
 640        return sgl;
 641}
 642EXPORT_SYMBOL(sgl_alloc_order);
 643
 644/**
 645 * sgl_alloc - allocate a scatterlist and its pages
 646 * @length: Length in bytes of the scatterlist
 647 * @gfp: Memory allocation flags
 648 * @nent_p: [out] Number of entries in the scatterlist
 649 *
 650 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
 651 */
 652struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
 653                              unsigned int *nent_p)
 654{
 655        return sgl_alloc_order(length, 0, false, gfp, nent_p);
 656}
 657EXPORT_SYMBOL(sgl_alloc);
 658
 659/**
 660 * sgl_free_n_order - free a scatterlist and its pages
 661 * @sgl: Scatterlist with one or more elements
 662 * @nents: Maximum number of elements to free
 663 * @order: Second argument for __free_pages()
 664 *
 665 * Notes:
 666 * - If several scatterlists have been chained and each chain element is
 667 *   freed separately then it's essential to set nents correctly to avoid that a
 668 *   page would get freed twice.
 669 * - All pages in a chained scatterlist can be freed at once by setting @nents
 670 *   to a high number.
 671 */
 672void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
 673{
 674        struct scatterlist *sg;
 675        struct page *page;
 676        int i;
 677
 678        for_each_sg(sgl, sg, nents, i) {
 679                if (!sg)
 680                        break;
 681                page = sg_page(sg);
 682                if (page)
 683                        __free_pages(page, order);
 684        }
 685        kfree(sgl);
 686}
 687EXPORT_SYMBOL(sgl_free_n_order);
 688
 689/**
 690 * sgl_free_order - free a scatterlist and its pages
 691 * @sgl: Scatterlist with one or more elements
 692 * @order: Second argument for __free_pages()
 693 */
 694void sgl_free_order(struct scatterlist *sgl, int order)
 695{
 696        sgl_free_n_order(sgl, INT_MAX, order);
 697}
 698EXPORT_SYMBOL(sgl_free_order);
 699
 700/**
 701 * sgl_free - free a scatterlist and its pages
 702 * @sgl: Scatterlist with one or more elements
 703 */
 704void sgl_free(struct scatterlist *sgl)
 705{
 706        sgl_free_order(sgl, 0);
 707}
 708EXPORT_SYMBOL(sgl_free);
 709
 710#endif /* CONFIG_SGL_ALLOC */
 711
 712void __sg_page_iter_start(struct sg_page_iter *piter,
 713                          struct scatterlist *sglist, unsigned int nents,
 714                          unsigned long pgoffset)
 715{
 716        piter->__pg_advance = 0;
 717        piter->__nents = nents;
 718
 719        piter->sg = sglist;
 720        piter->sg_pgoffset = pgoffset;
 721}
 722EXPORT_SYMBOL(__sg_page_iter_start);
 723
 724static int sg_page_count(struct scatterlist *sg)
 725{
 726        return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
 727}
 728
 729bool __sg_page_iter_next(struct sg_page_iter *piter)
 730{
 731        if (!piter->__nents || !piter->sg)
 732                return false;
 733
 734        piter->sg_pgoffset += piter->__pg_advance;
 735        piter->__pg_advance = 1;
 736
 737        while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
 738                piter->sg_pgoffset -= sg_page_count(piter->sg);
 739                piter->sg = sg_next(piter->sg);
 740                if (!--piter->__nents || !piter->sg)
 741                        return false;
 742        }
 743
 744        return true;
 745}
 746EXPORT_SYMBOL(__sg_page_iter_next);
 747
 748static int sg_dma_page_count(struct scatterlist *sg)
 749{
 750        return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
 751}
 752
 753bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
 754{
 755        struct sg_page_iter *piter = &dma_iter->base;
 756
 757        if (!piter->__nents || !piter->sg)
 758                return false;
 759
 760        piter->sg_pgoffset += piter->__pg_advance;
 761        piter->__pg_advance = 1;
 762
 763        while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
 764                piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
 765                piter->sg = sg_next(piter->sg);
 766                if (!--piter->__nents || !piter->sg)
 767                        return false;
 768        }
 769
 770        return true;
 771}
 772EXPORT_SYMBOL(__sg_page_iter_dma_next);
 773
 774/**
 775 * sg_miter_start - start mapping iteration over a sg list
 776 * @miter: sg mapping iter to be started
 777 * @sgl: sg list to iterate over
 778 * @nents: number of sg entries
 779 *
 780 * Description:
 781 *   Starts mapping iterator @miter.
 782 *
 783 * Context:
 784 *   Don't care.
 785 */
 786void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
 787                    unsigned int nents, unsigned int flags)
 788{
 789        memset(miter, 0, sizeof(struct sg_mapping_iter));
 790
 791        __sg_page_iter_start(&miter->piter, sgl, nents, 0);
 792        WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
 793        miter->__flags = flags;
 794}
 795EXPORT_SYMBOL(sg_miter_start);
 796
 797static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
 798{
 799        if (!miter->__remaining) {
 800                struct scatterlist *sg;
 801
 802                if (!__sg_page_iter_next(&miter->piter))
 803                        return false;
 804
 805                sg = miter->piter.sg;
 806
 807                miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
 808                miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
 809                miter->__offset &= PAGE_SIZE - 1;
 810                miter->__remaining = sg->offset + sg->length -
 811                                     (miter->piter.sg_pgoffset << PAGE_SHIFT) -
 812                                     miter->__offset;
 813                miter->__remaining = min_t(unsigned long, miter->__remaining,
 814                                           PAGE_SIZE - miter->__offset);
 815        }
 816
 817        return true;
 818}
 819
 820/**
 821 * sg_miter_skip - reposition mapping iterator
 822 * @miter: sg mapping iter to be skipped
 823 * @offset: number of bytes to plus the current location
 824 *
 825 * Description:
 826 *   Sets the offset of @miter to its current location plus @offset bytes.
 827 *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
 828 *   stops @miter.
 829 *
 830 * Context:
 831 *   Don't care if @miter is stopped, or not proceeded yet.
 832 *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
 833 *
 834 * Returns:
 835 *   true if @miter contains the valid mapping.  false if end of sg
 836 *   list is reached.
 837 */
 838bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
 839{
 840        sg_miter_stop(miter);
 841
 842        while (offset) {
 843                off_t consumed;
 844
 845                if (!sg_miter_get_next_page(miter))
 846                        return false;
 847
 848                consumed = min_t(off_t, offset, miter->__remaining);
 849                miter->__offset += consumed;
 850                miter->__remaining -= consumed;
 851                offset -= consumed;
 852        }
 853
 854        return true;
 855}
 856EXPORT_SYMBOL(sg_miter_skip);
 857
 858/**
 859 * sg_miter_next - proceed mapping iterator to the next mapping
 860 * @miter: sg mapping iter to proceed
 861 *
 862 * Description:
 863 *   Proceeds @miter to the next mapping.  @miter should have been started
 864 *   using sg_miter_start().  On successful return, @miter->page,
 865 *   @miter->addr and @miter->length point to the current mapping.
 866 *
 867 * Context:
 868 *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
 869 *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
 870 *
 871 * Returns:
 872 *   true if @miter contains the next mapping.  false if end of sg
 873 *   list is reached.
 874 */
 875bool sg_miter_next(struct sg_mapping_iter *miter)
 876{
 877        sg_miter_stop(miter);
 878
 879        /*
 880         * Get to the next page if necessary.
 881         * __remaining, __offset is adjusted by sg_miter_stop
 882         */
 883        if (!sg_miter_get_next_page(miter))
 884                return false;
 885
 886        miter->page = sg_page_iter_page(&miter->piter);
 887        miter->consumed = miter->length = miter->__remaining;
 888
 889        if (miter->__flags & SG_MITER_ATOMIC)
 890                miter->addr = kmap_atomic(miter->page) + miter->__offset;
 891        else
 892                miter->addr = kmap(miter->page) + miter->__offset;
 893
 894        return true;
 895}
 896EXPORT_SYMBOL(sg_miter_next);
 897
 898/**
 899 * sg_miter_stop - stop mapping iteration
 900 * @miter: sg mapping iter to be stopped
 901 *
 902 * Description:
 903 *   Stops mapping iterator @miter.  @miter should have been started
 904 *   using sg_miter_start().  A stopped iteration can be resumed by
 905 *   calling sg_miter_next() on it.  This is useful when resources (kmap)
 906 *   need to be released during iteration.
 907 *
 908 * Context:
 909 *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
 910 *   otherwise.
 911 */
 912void sg_miter_stop(struct sg_mapping_iter *miter)
 913{
 914        WARN_ON(miter->consumed > miter->length);
 915
 916        /* drop resources from the last iteration */
 917        if (miter->addr) {
 918                miter->__offset += miter->consumed;
 919                miter->__remaining -= miter->consumed;
 920
 921                if (miter->__flags & SG_MITER_TO_SG)
 922                        flush_dcache_page(miter->page);
 923
 924                if (miter->__flags & SG_MITER_ATOMIC) {
 925                        WARN_ON_ONCE(preemptible());
 926                        kunmap_atomic(miter->addr);
 927                } else
 928                        kunmap(miter->page);
 929
 930                miter->page = NULL;
 931                miter->addr = NULL;
 932                miter->length = 0;
 933                miter->consumed = 0;
 934        }
 935}
 936EXPORT_SYMBOL(sg_miter_stop);
 937
 938/**
 939 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 940 * @sgl:                 The SG list
 941 * @nents:               Number of SG entries
 942 * @buf:                 Where to copy from
 943 * @buflen:              The number of bytes to copy
 944 * @skip:                Number of bytes to skip before copying
 945 * @to_buffer:           transfer direction (true == from an sg list to a
 946 *                       buffer, false == from a buffer to an sg list)
 947 *
 948 * Returns the number of copied bytes.
 949 *
 950 **/
 951size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
 952                      size_t buflen, off_t skip, bool to_buffer)
 953{
 954        unsigned int offset = 0;
 955        struct sg_mapping_iter miter;
 956        unsigned int sg_flags = SG_MITER_ATOMIC;
 957
 958        if (to_buffer)
 959                sg_flags |= SG_MITER_FROM_SG;
 960        else
 961                sg_flags |= SG_MITER_TO_SG;
 962
 963        sg_miter_start(&miter, sgl, nents, sg_flags);
 964
 965        if (!sg_miter_skip(&miter, skip))
 966                return 0;
 967
 968        while ((offset < buflen) && sg_miter_next(&miter)) {
 969                unsigned int len;
 970
 971                len = min(miter.length, buflen - offset);
 972
 973                if (to_buffer)
 974                        memcpy(buf + offset, miter.addr, len);
 975                else
 976                        memcpy(miter.addr, buf + offset, len);
 977
 978                offset += len;
 979        }
 980
 981        sg_miter_stop(&miter);
 982
 983        return offset;
 984}
 985EXPORT_SYMBOL(sg_copy_buffer);
 986
 987/**
 988 * sg_copy_from_buffer - Copy from a linear buffer to an SG list
 989 * @sgl:                 The SG list
 990 * @nents:               Number of SG entries
 991 * @buf:                 Where to copy from
 992 * @buflen:              The number of bytes to copy
 993 *
 994 * Returns the number of copied bytes.
 995 *
 996 **/
 997size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
 998                           const void *buf, size_t buflen)
 999{
1000        return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
1001}
1002EXPORT_SYMBOL(sg_copy_from_buffer);
1003
1004/**
1005 * sg_copy_to_buffer - Copy from an SG list to a linear buffer
1006 * @sgl:                 The SG list
1007 * @nents:               Number of SG entries
1008 * @buf:                 Where to copy to
1009 * @buflen:              The number of bytes to copy
1010 *
1011 * Returns the number of copied bytes.
1012 *
1013 **/
1014size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
1015                         void *buf, size_t buflen)
1016{
1017        return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
1018}
1019EXPORT_SYMBOL(sg_copy_to_buffer);
1020
1021/**
1022 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
1023 * @sgl:                 The SG list
1024 * @nents:               Number of SG entries
1025 * @buf:                 Where to copy from
1026 * @buflen:              The number of bytes to copy
1027 * @skip:                Number of bytes to skip before copying
1028 *
1029 * Returns the number of copied bytes.
1030 *
1031 **/
1032size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1033                            const void *buf, size_t buflen, off_t skip)
1034{
1035        return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
1036}
1037EXPORT_SYMBOL(sg_pcopy_from_buffer);
1038
1039/**
1040 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
1041 * @sgl:                 The SG list
1042 * @nents:               Number of SG entries
1043 * @buf:                 Where to copy to
1044 * @buflen:              The number of bytes to copy
1045 * @skip:                Number of bytes to skip before copying
1046 *
1047 * Returns the number of copied bytes.
1048 *
1049 **/
1050size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
1051                          void *buf, size_t buflen, off_t skip)
1052{
1053        return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
1054}
1055EXPORT_SYMBOL(sg_pcopy_to_buffer);
1056
1057/**
1058 * sg_zero_buffer - Zero-out a part of a SG list
1059 * @sgl:                 The SG list
1060 * @nents:               Number of SG entries
1061 * @buflen:              The number of bytes to zero out
1062 * @skip:                Number of bytes to skip before zeroing
1063 *
1064 * Returns the number of bytes zeroed.
1065 **/
1066size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
1067                       size_t buflen, off_t skip)
1068{
1069        unsigned int offset = 0;
1070        struct sg_mapping_iter miter;
1071        unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1072
1073        sg_miter_start(&miter, sgl, nents, sg_flags);
1074
1075        if (!sg_miter_skip(&miter, skip))
1076                return false;
1077
1078        while (offset < buflen && sg_miter_next(&miter)) {
1079                unsigned int len;
1080
1081                len = min(miter.length, buflen - offset);
1082                memset(miter.addr, 0, len);
1083
1084                offset += len;
1085        }
1086
1087        sg_miter_stop(&miter);
1088        return offset;
1089}
1090EXPORT_SYMBOL(sg_zero_buffer);
1091