linux/mm/iov_iter.c
<<
>>
Prefs
   1#include <linux/export.h>
   2#include <linux/uio.h>
   3#include <linux/pagemap.h>
   4#include <linux/slab.h>
   5#include <linux/vmalloc.h>
   6
   7static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i)
   8{
   9        size_t skip, copy, left, wanted;
  10        const struct iovec *iov;
  11        char __user *buf;
  12
  13        if (unlikely(bytes > i->count))
  14                bytes = i->count;
  15
  16        if (unlikely(!bytes))
  17                return 0;
  18
  19        wanted = bytes;
  20        iov = i->iov;
  21        skip = i->iov_offset;
  22        buf = iov->iov_base + skip;
  23        copy = min(bytes, iov->iov_len - skip);
  24
  25        left = __copy_to_user(buf, from, copy);
  26        copy -= left;
  27        skip += copy;
  28        from += copy;
  29        bytes -= copy;
  30        while (unlikely(!left && bytes)) {
  31                iov++;
  32                buf = iov->iov_base;
  33                copy = min(bytes, iov->iov_len);
  34                left = __copy_to_user(buf, from, copy);
  35                copy -= left;
  36                skip = copy;
  37                from += copy;
  38                bytes -= copy;
  39        }
  40
  41        if (skip == iov->iov_len) {
  42                iov++;
  43                skip = 0;
  44        }
  45        i->count -= wanted - bytes;
  46        i->nr_segs -= iov - i->iov;
  47        i->iov = iov;
  48        i->iov_offset = skip;
  49        return wanted - bytes;
  50}
  51
  52static size_t copy_from_iter_iovec(void *to, size_t bytes, struct iov_iter *i)
  53{
  54        size_t skip, copy, left, wanted;
  55        const struct iovec *iov;
  56        char __user *buf;
  57
  58        if (unlikely(bytes > i->count))
  59                bytes = i->count;
  60
  61        if (unlikely(!bytes))
  62                return 0;
  63
  64        wanted = bytes;
  65        iov = i->iov;
  66        skip = i->iov_offset;
  67        buf = iov->iov_base + skip;
  68        copy = min(bytes, iov->iov_len - skip);
  69
  70        left = __copy_from_user(to, buf, copy);
  71        copy -= left;
  72        skip += copy;
  73        to += copy;
  74        bytes -= copy;
  75        while (unlikely(!left && bytes)) {
  76                iov++;
  77                buf = iov->iov_base;
  78                copy = min(bytes, iov->iov_len);
  79                left = __copy_from_user(to, buf, copy);
  80                copy -= left;
  81                skip = copy;
  82                to += copy;
  83                bytes -= copy;
  84        }
  85
  86        if (skip == iov->iov_len) {
  87                iov++;
  88                skip = 0;
  89        }
  90        i->count -= wanted - bytes;
  91        i->nr_segs -= iov - i->iov;
  92        i->iov = iov;
  93        i->iov_offset = skip;
  94        return wanted - bytes;
  95}
  96
  97static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
  98                         struct iov_iter *i)
  99{
 100        size_t skip, copy, left, wanted;
 101        const struct iovec *iov;
 102        char __user *buf;
 103        void *kaddr, *from;
 104
 105        if (unlikely(bytes > i->count))
 106                bytes = i->count;
 107
 108        if (unlikely(!bytes))
 109                return 0;
 110
 111        wanted = bytes;
 112        iov = i->iov;
 113        skip = i->iov_offset;
 114        buf = iov->iov_base + skip;
 115        copy = min(bytes, iov->iov_len - skip);
 116
 117        if (!fault_in_pages_writeable(buf, copy)) {
 118                kaddr = kmap_atomic(page);
 119                from = kaddr + offset;
 120
 121                /* first chunk, usually the only one */
 122                left = __copy_to_user_inatomic(buf, from, copy);
 123                copy -= left;
 124                skip += copy;
 125                from += copy;
 126                bytes -= copy;
 127
 128                while (unlikely(!left && bytes)) {
 129                        iov++;
 130                        buf = iov->iov_base;
 131                        copy = min(bytes, iov->iov_len);
 132                        left = __copy_to_user_inatomic(buf, from, copy);
 133                        copy -= left;
 134                        skip = copy;
 135                        from += copy;
 136                        bytes -= copy;
 137                }
 138                if (likely(!bytes)) {
 139                        kunmap_atomic(kaddr);
 140                        goto done;
 141                }
 142                offset = from - kaddr;
 143                buf += copy;
 144                kunmap_atomic(kaddr);
 145                copy = min(bytes, iov->iov_len - skip);
 146        }
 147        /* Too bad - revert to non-atomic kmap */
 148        kaddr = kmap(page);
 149        from = kaddr + offset;
 150        left = __copy_to_user(buf, from, copy);
 151        copy -= left;
 152        skip += copy;
 153        from += copy;
 154        bytes -= copy;
 155        while (unlikely(!left && bytes)) {
 156                iov++;
 157                buf = iov->iov_base;
 158                copy = min(bytes, iov->iov_len);
 159                left = __copy_to_user(buf, from, copy);
 160                copy -= left;
 161                skip = copy;
 162                from += copy;
 163                bytes -= copy;
 164        }
 165        kunmap(page);
 166done:
 167        if (skip == iov->iov_len) {
 168                iov++;
 169                skip = 0;
 170        }
 171        i->count -= wanted - bytes;
 172        i->nr_segs -= iov - i->iov;
 173        i->iov = iov;
 174        i->iov_offset = skip;
 175        return wanted - bytes;
 176}
 177
 178static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
 179                         struct iov_iter *i)
 180{
 181        size_t skip, copy, left, wanted;
 182        const struct iovec *iov;
 183        char __user *buf;
 184        void *kaddr, *to;
 185
 186        if (unlikely(bytes > i->count))
 187                bytes = i->count;
 188
 189        if (unlikely(!bytes))
 190                return 0;
 191
 192        wanted = bytes;
 193        iov = i->iov;
 194        skip = i->iov_offset;
 195        buf = iov->iov_base + skip;
 196        copy = min(bytes, iov->iov_len - skip);
 197
 198        if (!fault_in_pages_readable(buf, copy)) {
 199                kaddr = kmap_atomic(page);
 200                to = kaddr + offset;
 201
 202                /* first chunk, usually the only one */
 203                left = __copy_from_user_inatomic(to, buf, copy);
 204                copy -= left;
 205                skip += copy;
 206                to += copy;
 207                bytes -= copy;
 208
 209                while (unlikely(!left && bytes)) {
 210                        iov++;
 211                        buf = iov->iov_base;
 212                        copy = min(bytes, iov->iov_len);
 213                        left = __copy_from_user_inatomic(to, buf, copy);
 214                        copy -= left;
 215                        skip = copy;
 216                        to += copy;
 217                        bytes -= copy;
 218                }
 219                if (likely(!bytes)) {
 220                        kunmap_atomic(kaddr);
 221                        goto done;
 222                }
 223                offset = to - kaddr;
 224                buf += copy;
 225                kunmap_atomic(kaddr);
 226                copy = min(bytes, iov->iov_len - skip);
 227        }
 228        /* Too bad - revert to non-atomic kmap */
 229        kaddr = kmap(page);
 230        to = kaddr + offset;
 231        left = __copy_from_user(to, buf, copy);
 232        copy -= left;
 233        skip += copy;
 234        to += copy;
 235        bytes -= copy;
 236        while (unlikely(!left && bytes)) {
 237                iov++;
 238                buf = iov->iov_base;
 239                copy = min(bytes, iov->iov_len);
 240                left = __copy_from_user(to, buf, copy);
 241                copy -= left;
 242                skip = copy;
 243                to += copy;
 244                bytes -= copy;
 245        }
 246        kunmap(page);
 247done:
 248        if (skip == iov->iov_len) {
 249                iov++;
 250                skip = 0;
 251        }
 252        i->count -= wanted - bytes;
 253        i->nr_segs -= iov - i->iov;
 254        i->iov = iov;
 255        i->iov_offset = skip;
 256        return wanted - bytes;
 257}
 258
 259static size_t zero_iovec(size_t bytes, struct iov_iter *i)
 260{
 261        size_t skip, copy, left, wanted;
 262        const struct iovec *iov;
 263        char __user *buf;
 264
 265        if (unlikely(bytes > i->count))
 266                bytes = i->count;
 267
 268        if (unlikely(!bytes))
 269                return 0;
 270
 271        wanted = bytes;
 272        iov = i->iov;
 273        skip = i->iov_offset;
 274        buf = iov->iov_base + skip;
 275        copy = min(bytes, iov->iov_len - skip);
 276
 277        left = __clear_user(buf, copy);
 278        copy -= left;
 279        skip += copy;
 280        bytes -= copy;
 281
 282        while (unlikely(!left && bytes)) {
 283                iov++;
 284                buf = iov->iov_base;
 285                copy = min(bytes, iov->iov_len);
 286                left = __clear_user(buf, copy);
 287                copy -= left;
 288                skip = copy;
 289                bytes -= copy;
 290        }
 291
 292        if (skip == iov->iov_len) {
 293                iov++;
 294                skip = 0;
 295        }
 296        i->count -= wanted - bytes;
 297        i->nr_segs -= iov - i->iov;
 298        i->iov = iov;
 299        i->iov_offset = skip;
 300        return wanted - bytes;
 301}
 302
 303static size_t __iovec_copy_from_user_inatomic(char *vaddr,
 304                        const struct iovec *iov, size_t base, size_t bytes)
 305{
 306        size_t copied = 0, left = 0;
 307
 308        while (bytes) {
 309                char __user *buf = iov->iov_base + base;
 310                int copy = min(bytes, iov->iov_len - base);
 311
 312                base = 0;
 313                left = __copy_from_user_inatomic(vaddr, buf, copy);
 314                copied += copy;
 315                bytes -= copy;
 316                vaddr += copy;
 317                iov++;
 318
 319                if (unlikely(left))
 320                        break;
 321        }
 322        return copied - left;
 323}
 324
 325/*
 326 * Copy as much as we can into the page and return the number of bytes which
 327 * were successfully copied.  If a fault is encountered then return the number of
 328 * bytes which were copied.
 329 */
 330static size_t copy_from_user_atomic_iovec(struct page *page,
 331                struct iov_iter *i, unsigned long offset, size_t bytes)
 332{
 333        char *kaddr;
 334        size_t copied;
 335
 336        kaddr = kmap_atomic(page);
 337        if (likely(i->nr_segs == 1)) {
 338                int left;
 339                char __user *buf = i->iov->iov_base + i->iov_offset;
 340                left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
 341                copied = bytes - left;
 342        } else {
 343                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
 344                                                i->iov, i->iov_offset, bytes);
 345        }
 346        kunmap_atomic(kaddr);
 347
 348        return copied;
 349}
 350
 351static void advance_iovec(struct iov_iter *i, size_t bytes)
 352{
 353        BUG_ON(i->count < bytes);
 354
 355        if (likely(i->nr_segs == 1)) {
 356                i->iov_offset += bytes;
 357                i->count -= bytes;
 358        } else {
 359                const struct iovec *iov = i->iov;
 360                size_t base = i->iov_offset;
 361                unsigned long nr_segs = i->nr_segs;
 362
 363                /*
 364                 * The !iov->iov_len check ensures we skip over unlikely
 365                 * zero-length segments (without overruning the iovec).
 366                 */
 367                while (bytes || unlikely(i->count && !iov->iov_len)) {
 368                        int copy;
 369
 370                        copy = min(bytes, iov->iov_len - base);
 371                        BUG_ON(!i->count || i->count < copy);
 372                        i->count -= copy;
 373                        bytes -= copy;
 374                        base += copy;
 375                        if (iov->iov_len == base) {
 376                                iov++;
 377                                nr_segs--;
 378                                base = 0;
 379                        }
 380                }
 381                i->iov = iov;
 382                i->iov_offset = base;
 383                i->nr_segs = nr_segs;
 384        }
 385}
 386
 387/*
 388 * Fault in the first iovec of the given iov_iter, to a maximum length
 389 * of bytes. Returns 0 on success, or non-zero if the memory could not be
 390 * accessed (ie. because it is an invalid address).
 391 *
 392 * writev-intensive code may want this to prefault several iovecs -- that
 393 * would be possible (callers must not rely on the fact that _only_ the
 394 * first iovec will be faulted with the current implementation).
 395 */
 396int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 397{
 398        if (!(i->type & ITER_BVEC)) {
 399                char __user *buf = i->iov->iov_base + i->iov_offset;
 400                bytes = min(bytes, i->iov->iov_len - i->iov_offset);
 401                return fault_in_pages_readable(buf, bytes);
 402        }
 403        return 0;
 404}
 405EXPORT_SYMBOL(iov_iter_fault_in_readable);
 406
 407static unsigned long alignment_iovec(const struct iov_iter *i)
 408{
 409        const struct iovec *iov = i->iov;
 410        unsigned long res;
 411        size_t size = i->count;
 412        size_t n;
 413
 414        if (!size)
 415                return 0;
 416
 417        res = (unsigned long)iov->iov_base + i->iov_offset;
 418        n = iov->iov_len - i->iov_offset;
 419        if (n >= size)
 420                return res | size;
 421        size -= n;
 422        res |= n;
 423        while (size > (++iov)->iov_len) {
 424                res |= (unsigned long)iov->iov_base | iov->iov_len;
 425                size -= iov->iov_len;
 426        }
 427        res |= (unsigned long)iov->iov_base | size;
 428        return res;
 429}
 430
 431void iov_iter_init(struct iov_iter *i, int direction,
 432                        const struct iovec *iov, unsigned long nr_segs,
 433                        size_t count)
 434{
 435        /* It will get better.  Eventually... */
 436        if (segment_eq(get_fs(), KERNEL_DS))
 437                direction |= ITER_KVEC;
 438        i->type = direction;
 439        i->iov = iov;
 440        i->nr_segs = nr_segs;
 441        i->iov_offset = 0;
 442        i->count = count;
 443}
 444EXPORT_SYMBOL(iov_iter_init);
 445
 446static ssize_t get_pages_iovec(struct iov_iter *i,
 447                   struct page **pages, size_t maxsize, unsigned maxpages,
 448                   size_t *start)
 449{
 450        size_t offset = i->iov_offset;
 451        const struct iovec *iov = i->iov;
 452        size_t len;
 453        unsigned long addr;
 454        int n;
 455        int res;
 456
 457        len = iov->iov_len - offset;
 458        if (len > i->count)
 459                len = i->count;
 460        if (len > maxsize)
 461                len = maxsize;
 462        addr = (unsigned long)iov->iov_base + offset;
 463        len += *start = addr & (PAGE_SIZE - 1);
 464        if (len > maxpages * PAGE_SIZE)
 465                len = maxpages * PAGE_SIZE;
 466        addr &= ~(PAGE_SIZE - 1);
 467        n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
 468        res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
 469        if (unlikely(res < 0))
 470                return res;
 471        return (res == n ? len : res * PAGE_SIZE) - *start;
 472}
 473
 474static ssize_t get_pages_alloc_iovec(struct iov_iter *i,
 475                   struct page ***pages, size_t maxsize,
 476                   size_t *start)
 477{
 478        size_t offset = i->iov_offset;
 479        const struct iovec *iov = i->iov;
 480        size_t len;
 481        unsigned long addr;
 482        void *p;
 483        int n;
 484        int res;
 485
 486        len = iov->iov_len - offset;
 487        if (len > i->count)
 488                len = i->count;
 489        if (len > maxsize)
 490                len = maxsize;
 491        addr = (unsigned long)iov->iov_base + offset;
 492        len += *start = addr & (PAGE_SIZE - 1);
 493        addr &= ~(PAGE_SIZE - 1);
 494        n = (len + PAGE_SIZE - 1) / PAGE_SIZE;
 495        
 496        p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
 497        if (!p)
 498                p = vmalloc(n * sizeof(struct page *));
 499        if (!p)
 500                return -ENOMEM;
 501
 502        res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
 503        if (unlikely(res < 0)) {
 504                kvfree(p);
 505                return res;
 506        }
 507        *pages = p;
 508        return (res == n ? len : res * PAGE_SIZE) - *start;
 509}
 510
 511static int iov_iter_npages_iovec(const struct iov_iter *i, int maxpages)
 512{
 513        size_t offset = i->iov_offset;
 514        size_t size = i->count;
 515        const struct iovec *iov = i->iov;
 516        int npages = 0;
 517        int n;
 518
 519        for (n = 0; size && n < i->nr_segs; n++, iov++) {
 520                unsigned long addr = (unsigned long)iov->iov_base + offset;
 521                size_t len = iov->iov_len - offset;
 522                offset = 0;
 523                if (unlikely(!len))     /* empty segment */
 524                        continue;
 525                if (len > size)
 526                        len = size;
 527                npages += (addr + len + PAGE_SIZE - 1) / PAGE_SIZE
 528                          - addr / PAGE_SIZE;
 529                if (npages >= maxpages) /* don't bother going further */
 530                        return maxpages;
 531                size -= len;
 532                offset = 0;
 533        }
 534        return min(npages, maxpages);
 535}
 536
 537static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
 538{
 539        char *from = kmap_atomic(page);
 540        memcpy(to, from + offset, len);
 541        kunmap_atomic(from);
 542}
 543
 544static void memcpy_to_page(struct page *page, size_t offset, char *from, size_t len)
 545{
 546        char *to = kmap_atomic(page);
 547        memcpy(to + offset, from, len);
 548        kunmap_atomic(to);
 549}
 550
 551static void memzero_page(struct page *page, size_t offset, size_t len)
 552{
 553        char *addr = kmap_atomic(page);
 554        memset(addr + offset, 0, len);
 555        kunmap_atomic(addr);
 556}
 557
 558static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i)
 559{
 560        size_t skip, copy, wanted;
 561        const struct bio_vec *bvec;
 562
 563        if (unlikely(bytes > i->count))
 564                bytes = i->count;
 565
 566        if (unlikely(!bytes))
 567                return 0;
 568
 569        wanted = bytes;
 570        bvec = i->bvec;
 571        skip = i->iov_offset;
 572        copy = min_t(size_t, bytes, bvec->bv_len - skip);
 573
 574        memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy);
 575        skip += copy;
 576        from += copy;
 577        bytes -= copy;
 578        while (bytes) {
 579                bvec++;
 580                copy = min(bytes, (size_t)bvec->bv_len);
 581                memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy);
 582                skip = copy;
 583                from += copy;
 584                bytes -= copy;
 585        }
 586        if (skip == bvec->bv_len) {
 587                bvec++;
 588                skip = 0;
 589        }
 590        i->count -= wanted - bytes;
 591        i->nr_segs -= bvec - i->bvec;
 592        i->bvec = bvec;
 593        i->iov_offset = skip;
 594        return wanted - bytes;
 595}
 596
 597static size_t copy_from_iter_bvec(void *to, size_t bytes, struct iov_iter *i)
 598{
 599        size_t skip, copy, wanted;
 600        const struct bio_vec *bvec;
 601
 602        if (unlikely(bytes > i->count))
 603                bytes = i->count;
 604
 605        if (unlikely(!bytes))
 606                return 0;
 607
 608        wanted = bytes;
 609        bvec = i->bvec;
 610        skip = i->iov_offset;
 611
 612        copy = min(bytes, bvec->bv_len - skip);
 613
 614        memcpy_from_page(to, bvec->bv_page, bvec->bv_offset + skip, copy);
 615
 616        to += copy;
 617        skip += copy;
 618        bytes -= copy;
 619
 620        while (bytes) {
 621                bvec++;
 622                copy = min(bytes, (size_t)bvec->bv_len);
 623                memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, copy);
 624                skip = copy;
 625                to += copy;
 626                bytes -= copy;
 627        }
 628        if (skip == bvec->bv_len) {
 629                bvec++;
 630                skip = 0;
 631        }
 632        i->count -= wanted;
 633        i->nr_segs -= bvec - i->bvec;
 634        i->bvec = bvec;
 635        i->iov_offset = skip;
 636        return wanted;
 637}
 638
 639static size_t copy_page_to_iter_bvec(struct page *page, size_t offset,
 640                                        size_t bytes, struct iov_iter *i)
 641{
 642        void *kaddr = kmap_atomic(page);
 643        size_t wanted = copy_to_iter_bvec(kaddr + offset, bytes, i);
 644        kunmap_atomic(kaddr);
 645        return wanted;
 646}
 647
 648static size_t copy_page_from_iter_bvec(struct page *page, size_t offset,
 649                                        size_t bytes, struct iov_iter *i)
 650{
 651        void *kaddr = kmap_atomic(page);
 652        size_t wanted = copy_from_iter_bvec(kaddr + offset, bytes, i);
 653        kunmap_atomic(kaddr);
 654        return wanted;
 655}
 656
 657static size_t zero_bvec(size_t bytes, struct iov_iter *i)
 658{
 659        size_t skip, copy, wanted;
 660        const struct bio_vec *bvec;
 661
 662        if (unlikely(bytes > i->count))
 663                bytes = i->count;
 664
 665        if (unlikely(!bytes))
 666                return 0;
 667
 668        wanted = bytes;
 669        bvec = i->bvec;
 670        skip = i->iov_offset;
 671        copy = min_t(size_t, bytes, bvec->bv_len - skip);
 672
 673        memzero_page(bvec->bv_page, skip + bvec->bv_offset, copy);
 674        skip += copy;
 675        bytes -= copy;
 676        while (bytes) {
 677                bvec++;
 678                copy = min(bytes, (size_t)bvec->bv_len);
 679                memzero_page(bvec->bv_page, bvec->bv_offset, copy);
 680                skip = copy;
 681                bytes -= copy;
 682        }
 683        if (skip == bvec->bv_len) {
 684                bvec++;
 685                skip = 0;
 686        }
 687        i->count -= wanted - bytes;
 688        i->nr_segs -= bvec - i->bvec;
 689        i->bvec = bvec;
 690        i->iov_offset = skip;
 691        return wanted - bytes;
 692}
 693
 694static size_t copy_from_user_bvec(struct page *page,
 695                struct iov_iter *i, unsigned long offset, size_t bytes)
 696{
 697        char *kaddr;
 698        size_t left;
 699        const struct bio_vec *bvec;
 700        size_t base = i->iov_offset;
 701
 702        kaddr = kmap_atomic(page);
 703        for (left = bytes, bvec = i->bvec; left; bvec++, base = 0) {
 704                size_t copy = min(left, bvec->bv_len - base);
 705                if (!bvec->bv_len)
 706                        continue;
 707                memcpy_from_page(kaddr + offset, bvec->bv_page,
 708                                 bvec->bv_offset + base, copy);
 709                offset += copy;
 710                left -= copy;
 711        }
 712        kunmap_atomic(kaddr);
 713        return bytes;
 714}
 715
 716static void advance_bvec(struct iov_iter *i, size_t bytes)
 717{
 718        BUG_ON(i->count < bytes);
 719
 720        if (likely(i->nr_segs == 1)) {
 721                i->iov_offset += bytes;
 722                i->count -= bytes;
 723        } else {
 724                const struct bio_vec *bvec = i->bvec;
 725                size_t base = i->iov_offset;
 726                unsigned long nr_segs = i->nr_segs;
 727
 728                /*
 729                 * The !iov->iov_len check ensures we skip over unlikely
 730                 * zero-length segments (without overruning the iovec).
 731                 */
 732                while (bytes || unlikely(i->count && !bvec->bv_len)) {
 733                        int copy;
 734
 735                        copy = min(bytes, bvec->bv_len - base);
 736                        BUG_ON(!i->count || i->count < copy);
 737                        i->count -= copy;
 738                        bytes -= copy;
 739                        base += copy;
 740                        if (bvec->bv_len == base) {
 741                                bvec++;
 742                                nr_segs--;
 743                                base = 0;
 744                        }
 745                }
 746                i->bvec = bvec;
 747                i->iov_offset = base;
 748                i->nr_segs = nr_segs;
 749        }
 750}
 751
 752static unsigned long alignment_bvec(const struct iov_iter *i)
 753{
 754        const struct bio_vec *bvec = i->bvec;
 755        unsigned long res;
 756        size_t size = i->count;
 757        size_t n;
 758
 759        if (!size)
 760                return 0;
 761
 762        res = bvec->bv_offset + i->iov_offset;
 763        n = bvec->bv_len - i->iov_offset;
 764        if (n >= size)
 765                return res | size;
 766        size -= n;
 767        res |= n;
 768        while (size > (++bvec)->bv_len) {
 769                res |= bvec->bv_offset | bvec->bv_len;
 770                size -= bvec->bv_len;
 771        }
 772        res |= bvec->bv_offset | size;
 773        return res;
 774}
 775
 776static ssize_t get_pages_bvec(struct iov_iter *i,
 777                   struct page **pages, size_t maxsize, unsigned maxpages,
 778                   size_t *start)
 779{
 780        const struct bio_vec *bvec = i->bvec;
 781        size_t len = bvec->bv_len - i->iov_offset;
 782        if (len > i->count)
 783                len = i->count;
 784        if (len > maxsize)
 785                len = maxsize;
 786        /* can't be more than PAGE_SIZE */
 787        *start = bvec->bv_offset + i->iov_offset;
 788
 789        get_page(*pages = bvec->bv_page);
 790
 791        return len;
 792}
 793
 794static ssize_t get_pages_alloc_bvec(struct iov_iter *i,
 795                   struct page ***pages, size_t maxsize,
 796                   size_t *start)
 797{
 798        const struct bio_vec *bvec = i->bvec;
 799        size_t len = bvec->bv_len - i->iov_offset;
 800        if (len > i->count)
 801                len = i->count;
 802        if (len > maxsize)
 803                len = maxsize;
 804        *start = bvec->bv_offset + i->iov_offset;
 805
 806        *pages = kmalloc(sizeof(struct page *), GFP_KERNEL);
 807        if (!*pages)
 808                return -ENOMEM;
 809
 810        get_page(**pages = bvec->bv_page);
 811
 812        return len;
 813}
 814
 815static int iov_iter_npages_bvec(const struct iov_iter *i, int maxpages)
 816{
 817        size_t offset = i->iov_offset;
 818        size_t size = i->count;
 819        const struct bio_vec *bvec = i->bvec;
 820        int npages = 0;
 821        int n;
 822
 823        for (n = 0; size && n < i->nr_segs; n++, bvec++) {
 824                size_t len = bvec->bv_len - offset;
 825                offset = 0;
 826                if (unlikely(!len))     /* empty segment */
 827                        continue;
 828                if (len > size)
 829                        len = size;
 830                npages++;
 831                if (npages >= maxpages) /* don't bother going further */
 832                        return maxpages;
 833                size -= len;
 834                offset = 0;
 835        }
 836        return min(npages, maxpages);
 837}
 838
 839size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 840                         struct iov_iter *i)
 841{
 842        if (i->type & ITER_BVEC)
 843                return copy_page_to_iter_bvec(page, offset, bytes, i);
 844        else
 845                return copy_page_to_iter_iovec(page, offset, bytes, i);
 846}
 847EXPORT_SYMBOL(copy_page_to_iter);
 848
 849size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 850                         struct iov_iter *i)
 851{
 852        if (i->type & ITER_BVEC)
 853                return copy_page_from_iter_bvec(page, offset, bytes, i);
 854        else
 855                return copy_page_from_iter_iovec(page, offset, bytes, i);
 856}
 857EXPORT_SYMBOL(copy_page_from_iter);
 858
 859size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
 860{
 861        if (i->type & ITER_BVEC)
 862                return copy_to_iter_bvec(addr, bytes, i);
 863        else
 864                return copy_to_iter_iovec(addr, bytes, i);
 865}
 866EXPORT_SYMBOL(copy_to_iter);
 867
 868size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 869{
 870        if (i->type & ITER_BVEC)
 871                return copy_from_iter_bvec(addr, bytes, i);
 872        else
 873                return copy_from_iter_iovec(addr, bytes, i);
 874}
 875EXPORT_SYMBOL(copy_from_iter);
 876
 877size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 878{
 879        if (i->type & ITER_BVEC) {
 880                return zero_bvec(bytes, i);
 881        } else {
 882                return zero_iovec(bytes, i);
 883        }
 884}
 885EXPORT_SYMBOL(iov_iter_zero);
 886
 887size_t iov_iter_copy_from_user_atomic(struct page *page,
 888                struct iov_iter *i, unsigned long offset, size_t bytes)
 889{
 890        if (i->type & ITER_BVEC)
 891                return copy_from_user_bvec(page, i, offset, bytes);
 892        else
 893                return copy_from_user_atomic_iovec(page, i, offset, bytes);
 894}
 895EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
 896
 897void iov_iter_advance(struct iov_iter *i, size_t size)
 898{
 899        if (i->type & ITER_BVEC)
 900                advance_bvec(i, size);
 901        else
 902                advance_iovec(i, size);
 903}
 904EXPORT_SYMBOL(iov_iter_advance);
 905
 906/*
 907 * Return the count of just the current iov_iter segment.
 908 */
 909size_t iov_iter_single_seg_count(const struct iov_iter *i)
 910{
 911        if (i->nr_segs == 1)
 912                return i->count;
 913        else if (i->type & ITER_BVEC)
 914                return min(i->count, i->bvec->bv_len - i->iov_offset);
 915        else
 916                return min(i->count, i->iov->iov_len - i->iov_offset);
 917}
 918EXPORT_SYMBOL(iov_iter_single_seg_count);
 919
 920unsigned long iov_iter_alignment(const struct iov_iter *i)
 921{
 922        if (i->type & ITER_BVEC)
 923                return alignment_bvec(i);
 924        else
 925                return alignment_iovec(i);
 926}
 927EXPORT_SYMBOL(iov_iter_alignment);
 928
 929ssize_t iov_iter_get_pages(struct iov_iter *i,
 930                   struct page **pages, size_t maxsize, unsigned maxpages,
 931                   size_t *start)
 932{
 933        if (i->type & ITER_BVEC)
 934                return get_pages_bvec(i, pages, maxsize, maxpages, start);
 935        else
 936                return get_pages_iovec(i, pages, maxsize, maxpages, start);
 937}
 938EXPORT_SYMBOL(iov_iter_get_pages);
 939
 940ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
 941                   struct page ***pages, size_t maxsize,
 942                   size_t *start)
 943{
 944        if (i->type & ITER_BVEC)
 945                return get_pages_alloc_bvec(i, pages, maxsize, start);
 946        else
 947                return get_pages_alloc_iovec(i, pages, maxsize, start);
 948}
 949EXPORT_SYMBOL(iov_iter_get_pages_alloc);
 950
 951int iov_iter_npages(const struct iov_iter *i, int maxpages)
 952{
 953        if (i->type & ITER_BVEC)
 954                return iov_iter_npages_bvec(i, maxpages);
 955        else
 956                return iov_iter_npages_iovec(i, maxpages);
 957}
 958EXPORT_SYMBOL(iov_iter_npages);
 959