linux/lib/iov_iter.c
<<
>>
Prefs
   1#include <linux/export.h>
   2#include <linux/uio.h>
   3#include <linux/pagemap.h>
   4#include <linux/slab.h>
   5#include <linux/vmalloc.h>
   6#include <net/checksum.h>
   7
   8#define iterate_iovec(i, n, __v, __p, skip, STEP) {     \
   9        size_t left;                                    \
  10        size_t wanted = n;                              \
  11        __p = i->iov;                                   \
  12        __v.iov_len = min(n, __p->iov_len - skip);      \
  13        if (likely(__v.iov_len)) {                      \
  14                __v.iov_base = __p->iov_base + skip;    \
  15                left = (STEP);                          \
  16                __v.iov_len -= left;                    \
  17                skip += __v.iov_len;                    \
  18                n -= __v.iov_len;                       \
  19        } else {                                        \
  20                left = 0;                               \
  21        }                                               \
  22        while (unlikely(!left && n)) {                  \
  23                __p++;                                  \
  24                __v.iov_len = min(n, __p->iov_len);     \
  25                if (unlikely(!__v.iov_len))             \
  26                        continue;                       \
  27                __v.iov_base = __p->iov_base;           \
  28                left = (STEP);                          \
  29                __v.iov_len -= left;                    \
  30                skip = __v.iov_len;                     \
  31                n -= __v.iov_len;                       \
  32        }                                               \
  33        n = wanted - n;                                 \
  34}
  35
  36#define iterate_kvec(i, n, __v, __p, skip, STEP) {      \
  37        size_t wanted = n;                              \
  38        __p = i->kvec;                                  \
  39        __v.iov_len = min(n, __p->iov_len - skip);      \
  40        if (likely(__v.iov_len)) {                      \
  41                __v.iov_base = __p->iov_base + skip;    \
  42                (void)(STEP);                           \
  43                skip += __v.iov_len;                    \
  44                n -= __v.iov_len;                       \
  45        }                                               \
  46        while (unlikely(n)) {                           \
  47                __p++;                                  \
  48                __v.iov_len = min(n, __p->iov_len);     \
  49                if (unlikely(!__v.iov_len))             \
  50                        continue;                       \
  51                __v.iov_base = __p->iov_base;           \
  52                (void)(STEP);                           \
  53                skip = __v.iov_len;                     \
  54                n -= __v.iov_len;                       \
  55        }                                               \
  56        n = wanted;                                     \
  57}
  58
  59#define iterate_bvec(i, n, __v, __p, skip, STEP) {      \
  60        size_t wanted = n;                              \
  61        __p = i->bvec;                                  \
  62        __v.bv_len = min_t(size_t, n, __p->bv_len - skip);      \
  63        if (likely(__v.bv_len)) {                       \
  64                __v.bv_page = __p->bv_page;             \
  65                __v.bv_offset = __p->bv_offset + skip;  \
  66                (void)(STEP);                           \
  67                skip += __v.bv_len;                     \
  68                n -= __v.bv_len;                        \
  69        }                                               \
  70        while (unlikely(n)) {                           \
  71                __p++;                                  \
  72                __v.bv_len = min_t(size_t, n, __p->bv_len);     \
  73                if (unlikely(!__v.bv_len))              \
  74                        continue;                       \
  75                __v.bv_page = __p->bv_page;             \
  76                __v.bv_offset = __p->bv_offset;         \
  77                (void)(STEP);                           \
  78                skip = __v.bv_len;                      \
  79                n -= __v.bv_len;                        \
  80        }                                               \
  81        n = wanted;                                     \
  82}
  83
  84#define iterate_all_kinds(i, n, v, I, B, K) {                   \
  85        size_t skip = i->iov_offset;                            \
  86        if (unlikely(i->type & ITER_BVEC)) {                    \
  87                const struct bio_vec *bvec;                     \
  88                struct bio_vec v;                               \
  89                iterate_bvec(i, n, v, bvec, skip, (B))          \
  90        } else if (unlikely(i->type & ITER_KVEC)) {             \
  91                const struct kvec *kvec;                        \
  92                struct kvec v;                                  \
  93                iterate_kvec(i, n, v, kvec, skip, (K))          \
  94        } else {                                                \
  95                const struct iovec *iov;                        \
  96                struct iovec v;                                 \
  97                iterate_iovec(i, n, v, iov, skip, (I))          \
  98        }                                                       \
  99}
 100
 101#define iterate_and_advance(i, n, v, I, B, K) {                 \
 102        size_t skip = i->iov_offset;                            \
 103        if (unlikely(i->type & ITER_BVEC)) {                    \
 104                const struct bio_vec *bvec;                     \
 105                struct bio_vec v;                               \
 106                iterate_bvec(i, n, v, bvec, skip, (B))          \
 107                if (skip == bvec->bv_len) {                     \
 108                        bvec++;                                 \
 109                        skip = 0;                               \
 110                }                                               \
 111                i->nr_segs -= bvec - i->bvec;                   \
 112                i->bvec = bvec;                                 \
 113        } else if (unlikely(i->type & ITER_KVEC)) {             \
 114                const struct kvec *kvec;                        \
 115                struct kvec v;                                  \
 116                iterate_kvec(i, n, v, kvec, skip, (K))          \
 117                if (skip == kvec->iov_len) {                    \
 118                        kvec++;                                 \
 119                        skip = 0;                               \
 120                }                                               \
 121                i->nr_segs -= kvec - i->kvec;                   \
 122                i->kvec = kvec;                                 \
 123        } else {                                                \
 124                const struct iovec *iov;                        \
 125                struct iovec v;                                 \
 126                iterate_iovec(i, n, v, iov, skip, (I))          \
 127                if (skip == iov->iov_len) {                     \
 128                        iov++;                                  \
 129                        skip = 0;                               \
 130                }                                               \
 131                i->nr_segs -= iov - i->iov;                     \
 132                i->iov = iov;                                   \
 133        }                                                       \
 134        i->count -= n;                                          \
 135        i->iov_offset = skip;                                   \
 136}
 137
 138static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
 139                         struct iov_iter *i)
 140{
 141        size_t skip, copy, left, wanted;
 142        const struct iovec *iov;
 143        char __user *buf;
 144        void *kaddr, *from;
 145
 146        if (unlikely(bytes > i->count))
 147                bytes = i->count;
 148
 149        if (unlikely(!bytes))
 150                return 0;
 151
 152        wanted = bytes;
 153        iov = i->iov;
 154        skip = i->iov_offset;
 155        buf = iov->iov_base + skip;
 156        copy = min(bytes, iov->iov_len - skip);
 157
 158        if (!fault_in_pages_writeable(buf, copy)) {
 159                kaddr = kmap_atomic(page);
 160                from = kaddr + offset;
 161
 162                /* first chunk, usually the only one */
 163                left = __copy_to_user_inatomic(buf, from, copy);
 164                copy -= left;
 165                skip += copy;
 166                from += copy;
 167                bytes -= copy;
 168
 169                while (unlikely(!left && bytes)) {
 170                        iov++;
 171                        buf = iov->iov_base;
 172                        copy = min(bytes, iov->iov_len);
 173                        left = __copy_to_user_inatomic(buf, from, copy);
 174                        copy -= left;
 175                        skip = copy;
 176                        from += copy;
 177                        bytes -= copy;
 178                }
 179                if (likely(!bytes)) {
 180                        kunmap_atomic(kaddr);
 181                        goto done;
 182                }
 183                offset = from - kaddr;
 184                buf += copy;
 185                kunmap_atomic(kaddr);
 186                copy = min(bytes, iov->iov_len - skip);
 187        }
 188        /* Too bad - revert to non-atomic kmap */
 189        kaddr = kmap(page);
 190        from = kaddr + offset;
 191        left = __copy_to_user(buf, from, copy);
 192        copy -= left;
 193        skip += copy;
 194        from += copy;
 195        bytes -= copy;
 196        while (unlikely(!left && bytes)) {
 197                iov++;
 198                buf = iov->iov_base;
 199                copy = min(bytes, iov->iov_len);
 200                left = __copy_to_user(buf, from, copy);
 201                copy -= left;
 202                skip = copy;
 203                from += copy;
 204                bytes -= copy;
 205        }
 206        kunmap(page);
 207done:
 208        if (skip == iov->iov_len) {
 209                iov++;
 210                skip = 0;
 211        }
 212        i->count -= wanted - bytes;
 213        i->nr_segs -= iov - i->iov;
 214        i->iov = iov;
 215        i->iov_offset = skip;
 216        return wanted - bytes;
 217}
 218
 219static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
 220                         struct iov_iter *i)
 221{
 222        size_t skip, copy, left, wanted;
 223        const struct iovec *iov;
 224        char __user *buf;
 225        void *kaddr, *to;
 226
 227        if (unlikely(bytes > i->count))
 228                bytes = i->count;
 229
 230        if (unlikely(!bytes))
 231                return 0;
 232
 233        wanted = bytes;
 234        iov = i->iov;
 235        skip = i->iov_offset;
 236        buf = iov->iov_base + skip;
 237        copy = min(bytes, iov->iov_len - skip);
 238
 239        if (!fault_in_pages_readable(buf, copy)) {
 240                kaddr = kmap_atomic(page);
 241                to = kaddr + offset;
 242
 243                /* first chunk, usually the only one */
 244                left = __copy_from_user_inatomic(to, buf, copy);
 245                copy -= left;
 246                skip += copy;
 247                to += copy;
 248                bytes -= copy;
 249
 250                while (unlikely(!left && bytes)) {
 251                        iov++;
 252                        buf = iov->iov_base;
 253                        copy = min(bytes, iov->iov_len);
 254                        left = __copy_from_user_inatomic(to, buf, copy);
 255                        copy -= left;
 256                        skip = copy;
 257                        to += copy;
 258                        bytes -= copy;
 259                }
 260                if (likely(!bytes)) {
 261                        kunmap_atomic(kaddr);
 262                        goto done;
 263                }
 264                offset = to - kaddr;
 265                buf += copy;
 266                kunmap_atomic(kaddr);
 267                copy = min(bytes, iov->iov_len - skip);
 268        }
 269        /* Too bad - revert to non-atomic kmap */
 270        kaddr = kmap(page);
 271        to = kaddr + offset;
 272        left = __copy_from_user(to, buf, copy);
 273        copy -= left;
 274        skip += copy;
 275        to += copy;
 276        bytes -= copy;
 277        while (unlikely(!left && bytes)) {
 278                iov++;
 279                buf = iov->iov_base;
 280                copy = min(bytes, iov->iov_len);
 281                left = __copy_from_user(to, buf, copy);
 282                copy -= left;
 283                skip = copy;
 284                to += copy;
 285                bytes -= copy;
 286        }
 287        kunmap(page);
 288done:
 289        if (skip == iov->iov_len) {
 290                iov++;
 291                skip = 0;
 292        }
 293        i->count -= wanted - bytes;
 294        i->nr_segs -= iov - i->iov;
 295        i->iov = iov;
 296        i->iov_offset = skip;
 297        return wanted - bytes;
 298}
 299
 300/*
 301 * Fault in the first iovec of the given iov_iter, to a maximum length
 302 * of bytes. Returns 0 on success, or non-zero if the memory could not be
 303 * accessed (ie. because it is an invalid address).
 304 *
 305 * writev-intensive code may want this to prefault several iovecs -- that
 306 * would be possible (callers must not rely on the fact that _only_ the
 307 * first iovec will be faulted with the current implementation).
 308 */
 309int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 310{
 311        if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
 312                char __user *buf = i->iov->iov_base + i->iov_offset;
 313                bytes = min(bytes, i->iov->iov_len - i->iov_offset);
 314                return fault_in_pages_readable(buf, bytes);
 315        }
 316        return 0;
 317}
 318EXPORT_SYMBOL(iov_iter_fault_in_readable);
 319
 320/*
 321 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
 322 * bytes.  For each iovec, fault in each page that constitutes the iovec.
 323 *
 324 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
 325 * because it is an invalid address).
 326 */
 327int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
 328{
 329        size_t skip = i->iov_offset;
 330        const struct iovec *iov;
 331        int err;
 332        struct iovec v;
 333
 334        if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
 335                iterate_iovec(i, bytes, v, iov, skip, ({
 336                        err = fault_in_multipages_readable(v.iov_base,
 337                                        v.iov_len);
 338                        if (unlikely(err))
 339                        return err;
 340                0;}))
 341        }
 342        return 0;
 343}
 344EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
 345
 346void iov_iter_init(struct iov_iter *i, int direction,
 347                        const struct iovec *iov, unsigned long nr_segs,
 348                        size_t count)
 349{
 350        /* It will get better.  Eventually... */
 351        if (segment_eq(get_fs(), KERNEL_DS)) {
 352                direction |= ITER_KVEC;
 353                i->type = direction;
 354                i->kvec = (struct kvec *)iov;
 355        } else {
 356                i->type = direction;
 357                i->iov = iov;
 358        }
 359        i->nr_segs = nr_segs;
 360        i->iov_offset = 0;
 361        i->count = count;
 362}
 363EXPORT_SYMBOL(iov_iter_init);
 364
 365static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
 366{
 367        char *from = kmap_atomic(page);
 368        memcpy(to, from + offset, len);
 369        kunmap_atomic(from);
 370}
 371
 372static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
 373{
 374        char *to = kmap_atomic(page);
 375        memcpy(to + offset, from, len);
 376        kunmap_atomic(to);
 377}
 378
 379static void memzero_page(struct page *page, size_t offset, size_t len)
 380{
 381        char *addr = kmap_atomic(page);
 382        memset(addr + offset, 0, len);
 383        kunmap_atomic(addr);
 384}
 385
 386size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 387{
 388        const char *from = addr;
 389        if (unlikely(bytes > i->count))
 390                bytes = i->count;
 391
 392        if (unlikely(!bytes))
 393                return 0;
 394
 395        iterate_and_advance(i, bytes, v,
 396                __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
 397                               v.iov_len),
 398                memcpy_to_page(v.bv_page, v.bv_offset,
 399                               (from += v.bv_len) - v.bv_len, v.bv_len),
 400                memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
 401        )
 402
 403        return bytes;
 404}
 405EXPORT_SYMBOL(copy_to_iter);
 406
 407size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 408{
 409        char *to = addr;
 410        if (unlikely(bytes > i->count))
 411                bytes = i->count;
 412
 413        if (unlikely(!bytes))
 414                return 0;
 415
 416        iterate_and_advance(i, bytes, v,
 417                __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
 418                                 v.iov_len),
 419                memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 420                                 v.bv_offset, v.bv_len),
 421                memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 422        )
 423
 424        return bytes;
 425}
 426EXPORT_SYMBOL(copy_from_iter);
 427
 428size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 429{
 430        char *to = addr;
 431        if (unlikely(bytes > i->count))
 432                bytes = i->count;
 433
 434        if (unlikely(!bytes))
 435                return 0;
 436
 437        iterate_and_advance(i, bytes, v,
 438                __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
 439                                         v.iov_base, v.iov_len),
 440                memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 441                                 v.bv_offset, v.bv_len),
 442                memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 443        )
 444
 445        return bytes;
 446}
 447EXPORT_SYMBOL(copy_from_iter_nocache);
 448
 449size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 450                         struct iov_iter *i)
 451{
 452        if (i->type & (ITER_BVEC|ITER_KVEC)) {
 453                void *kaddr = kmap_atomic(page);
 454                size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
 455                kunmap_atomic(kaddr);
 456                return wanted;
 457        } else
 458                return copy_page_to_iter_iovec(page, offset, bytes, i);
 459}
 460EXPORT_SYMBOL(copy_page_to_iter);
 461
 462size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 463                         struct iov_iter *i)
 464{
 465        if (i->type & (ITER_BVEC|ITER_KVEC)) {
 466                void *kaddr = kmap_atomic(page);
 467                size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
 468                kunmap_atomic(kaddr);
 469                return wanted;
 470        } else
 471                return copy_page_from_iter_iovec(page, offset, bytes, i);
 472}
 473EXPORT_SYMBOL(copy_page_from_iter);
 474
 475size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 476{
 477        if (unlikely(bytes > i->count))
 478                bytes = i->count;
 479
 480        if (unlikely(!bytes))
 481                return 0;
 482
 483        iterate_and_advance(i, bytes, v,
 484                __clear_user(v.iov_base, v.iov_len),
 485                memzero_page(v.bv_page, v.bv_offset, v.bv_len),
 486                memset(v.iov_base, 0, v.iov_len)
 487        )
 488
 489        return bytes;
 490}
 491EXPORT_SYMBOL(iov_iter_zero);
 492
 493size_t iov_iter_copy_from_user_atomic(struct page *page,
 494                struct iov_iter *i, unsigned long offset, size_t bytes)
 495{
 496        char *kaddr = kmap_atomic(page), *p = kaddr + offset;
 497        iterate_all_kinds(i, bytes, v,
 498                __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
 499                                          v.iov_base, v.iov_len),
 500                memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
 501                                 v.bv_offset, v.bv_len),
 502                memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 503        )
 504        kunmap_atomic(kaddr);
 505        return bytes;
 506}
 507EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
 508
 509void iov_iter_advance(struct iov_iter *i, size_t size)
 510{
 511        iterate_and_advance(i, size, v, 0, 0, 0)
 512}
 513EXPORT_SYMBOL(iov_iter_advance);
 514
 515/*
 516 * Return the count of just the current iov_iter segment.
 517 */
 518size_t iov_iter_single_seg_count(const struct iov_iter *i)
 519{
 520        if (i->nr_segs == 1)
 521                return i->count;
 522        else if (i->type & ITER_BVEC)
 523                return min(i->count, i->bvec->bv_len - i->iov_offset);
 524        else
 525                return min(i->count, i->iov->iov_len - i->iov_offset);
 526}
 527EXPORT_SYMBOL(iov_iter_single_seg_count);
 528
 529void iov_iter_kvec(struct iov_iter *i, int direction,
 530                        const struct kvec *kvec, unsigned long nr_segs,
 531                        size_t count)
 532{
 533        BUG_ON(!(direction & ITER_KVEC));
 534        i->type = direction;
 535        i->kvec = kvec;
 536        i->nr_segs = nr_segs;
 537        i->iov_offset = 0;
 538        i->count = count;
 539}
 540EXPORT_SYMBOL(iov_iter_kvec);
 541
 542void iov_iter_bvec(struct iov_iter *i, int direction,
 543                        const struct bio_vec *bvec, unsigned long nr_segs,
 544                        size_t count)
 545{
 546        BUG_ON(!(direction & ITER_BVEC));
 547        i->type = direction;
 548        i->bvec = bvec;
 549        i->nr_segs = nr_segs;
 550        i->iov_offset = 0;
 551        i->count = count;
 552}
 553EXPORT_SYMBOL(iov_iter_bvec);
 554
 555unsigned long iov_iter_alignment(const struct iov_iter *i)
 556{
 557        unsigned long res = 0;
 558        size_t size = i->count;
 559
 560        if (!size)
 561                return 0;
 562
 563        iterate_all_kinds(i, size, v,
 564                (res |= (unsigned long)v.iov_base | v.iov_len, 0),
 565                res |= v.bv_offset | v.bv_len,
 566                res |= (unsigned long)v.iov_base | v.iov_len
 567        )
 568        return res;
 569}
 570EXPORT_SYMBOL(iov_iter_alignment);
 571
 572unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
 573{
 574        unsigned long res = 0;
 575        size_t size = i->count;
 576        if (!size)
 577                return 0;
 578
 579        iterate_all_kinds(i, size, v,
 580                (res |= (!res ? 0 : (unsigned long)v.iov_base) |
 581                        (size != v.iov_len ? size : 0), 0),
 582                (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
 583                        (size != v.bv_len ? size : 0)),
 584                (res |= (!res ? 0 : (unsigned long)v.iov_base) |
 585                        (size != v.iov_len ? size : 0))
 586                );
 587                return res;
 588}
 589EXPORT_SYMBOL(iov_iter_gap_alignment);
 590
 591ssize_t iov_iter_get_pages(struct iov_iter *i,
 592                   struct page **pages, size_t maxsize, unsigned maxpages,
 593                   size_t *start)
 594{
 595        if (maxsize > i->count)
 596                maxsize = i->count;
 597
 598        if (!maxsize)
 599                return 0;
 600
 601        iterate_all_kinds(i, maxsize, v, ({
 602                unsigned long addr = (unsigned long)v.iov_base;
 603                size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
 604                int n;
 605                int res;
 606
 607                if (len > maxpages * PAGE_SIZE)
 608                        len = maxpages * PAGE_SIZE;
 609                addr &= ~(PAGE_SIZE - 1);
 610                n = DIV_ROUND_UP(len, PAGE_SIZE);
 611                res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
 612                if (unlikely(res < 0))
 613                        return res;
 614                return (res == n ? len : res * PAGE_SIZE) - *start;
 615        0;}),({
 616                /* can't be more than PAGE_SIZE */
 617                *start = v.bv_offset;
 618                get_page(*pages = v.bv_page);
 619                return v.bv_len;
 620        }),({
 621                return -EFAULT;
 622        })
 623        )
 624        return 0;
 625}
 626EXPORT_SYMBOL(iov_iter_get_pages);
 627
 628static struct page **get_pages_array(size_t n)
 629{
 630        struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
 631        if (!p)
 632                p = vmalloc(n * sizeof(struct page *));
 633        return p;
 634}
 635
 636ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
 637                   struct page ***pages, size_t maxsize,
 638                   size_t *start)
 639{
 640        struct page **p;
 641
 642        if (maxsize > i->count)
 643                maxsize = i->count;
 644
 645        if (!maxsize)
 646                return 0;
 647
 648        iterate_all_kinds(i, maxsize, v, ({
 649                unsigned long addr = (unsigned long)v.iov_base;
 650                size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
 651                int n;
 652                int res;
 653
 654                addr &= ~(PAGE_SIZE - 1);
 655                n = DIV_ROUND_UP(len, PAGE_SIZE);
 656                p = get_pages_array(n);
 657                if (!p)
 658                        return -ENOMEM;
 659                res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
 660                if (unlikely(res < 0)) {
 661                        kvfree(p);
 662                        return res;
 663                }
 664                *pages = p;
 665                return (res == n ? len : res * PAGE_SIZE) - *start;
 666        0;}),({
 667                /* can't be more than PAGE_SIZE */
 668                *start = v.bv_offset;
 669                *pages = p = get_pages_array(1);
 670                if (!p)
 671                        return -ENOMEM;
 672                get_page(*p = v.bv_page);
 673                return v.bv_len;
 674        }),({
 675                return -EFAULT;
 676        })
 677        )
 678        return 0;
 679}
 680EXPORT_SYMBOL(iov_iter_get_pages_alloc);
 681
 682size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
 683                               struct iov_iter *i)
 684{
 685        char *to = addr;
 686        __wsum sum, next;
 687        size_t off = 0;
 688        if (unlikely(bytes > i->count))
 689                bytes = i->count;
 690
 691        if (unlikely(!bytes))
 692                return 0;
 693
 694        sum = *csum;
 695        iterate_and_advance(i, bytes, v, ({
 696                int err = 0;
 697                next = csum_and_copy_from_user(v.iov_base, 
 698                                               (to += v.iov_len) - v.iov_len,
 699                                               v.iov_len, 0, &err);
 700                if (!err) {
 701                        sum = csum_block_add(sum, next, off);
 702                        off += v.iov_len;
 703                }
 704                err ? v.iov_len : 0;
 705        }), ({
 706                char *p = kmap_atomic(v.bv_page);
 707                next = csum_partial_copy_nocheck(p + v.bv_offset,
 708                                                 (to += v.bv_len) - v.bv_len,
 709                                                 v.bv_len, 0);
 710                kunmap_atomic(p);
 711                sum = csum_block_add(sum, next, off);
 712                off += v.bv_len;
 713        }),({
 714                next = csum_partial_copy_nocheck(v.iov_base,
 715                                                 (to += v.iov_len) - v.iov_len,
 716                                                 v.iov_len, 0);
 717                sum = csum_block_add(sum, next, off);
 718                off += v.iov_len;
 719        })
 720        )
 721        *csum = sum;
 722        return bytes;
 723}
 724EXPORT_SYMBOL(csum_and_copy_from_iter);
 725
 726size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
 727                             struct iov_iter *i)
 728{
 729        const char *from = addr;
 730        __wsum sum, next;
 731        size_t off = 0;
 732        if (unlikely(bytes > i->count))
 733                bytes = i->count;
 734
 735        if (unlikely(!bytes))
 736                return 0;
 737
 738        sum = *csum;
 739        iterate_and_advance(i, bytes, v, ({
 740                int err = 0;
 741                next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
 742                                             v.iov_base, 
 743                                             v.iov_len, 0, &err);
 744                if (!err) {
 745                        sum = csum_block_add(sum, next, off);
 746                        off += v.iov_len;
 747                }
 748                err ? v.iov_len : 0;
 749        }), ({
 750                char *p = kmap_atomic(v.bv_page);
 751                next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
 752                                                 p + v.bv_offset,
 753                                                 v.bv_len, 0);
 754                kunmap_atomic(p);
 755                sum = csum_block_add(sum, next, off);
 756                off += v.bv_len;
 757        }),({
 758                next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
 759                                                 v.iov_base,
 760                                                 v.iov_len, 0);
 761                sum = csum_block_add(sum, next, off);
 762                off += v.iov_len;
 763        })
 764        )
 765        *csum = sum;
 766        return bytes;
 767}
 768EXPORT_SYMBOL(csum_and_copy_to_iter);
 769
 770int iov_iter_npages(const struct iov_iter *i, int maxpages)
 771{
 772        size_t size = i->count;
 773        int npages = 0;
 774
 775        if (!size)
 776                return 0;
 777
 778        iterate_all_kinds(i, size, v, ({
 779                unsigned long p = (unsigned long)v.iov_base;
 780                npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
 781                        - p / PAGE_SIZE;
 782                if (npages >= maxpages)
 783                        return maxpages;
 784        0;}),({
 785                npages++;
 786                if (npages >= maxpages)
 787                        return maxpages;
 788        }),({
 789                unsigned long p = (unsigned long)v.iov_base;
 790                npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
 791                        - p / PAGE_SIZE;
 792                if (npages >= maxpages)
 793                        return maxpages;
 794        })
 795        )
 796        return npages;
 797}
 798EXPORT_SYMBOL(iov_iter_npages);
 799
 800const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
 801{
 802        *new = *old;
 803        if (new->type & ITER_BVEC)
 804                return new->bvec = kmemdup(new->bvec,
 805                                    new->nr_segs * sizeof(struct bio_vec),
 806                                    flags);
 807        else
 808                /* iovec and kvec have identical layout */
 809                return new->iov = kmemdup(new->iov,
 810                                   new->nr_segs * sizeof(struct iovec),
 811                                   flags);
 812}
 813EXPORT_SYMBOL(dup_iter);
 814
 815int import_iovec(int type, const struct iovec __user * uvector,
 816                 unsigned nr_segs, unsigned fast_segs,
 817                 struct iovec **iov, struct iov_iter *i)
 818{
 819        ssize_t n;
 820        struct iovec *p;
 821        n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
 822                                  *iov, &p);
 823        if (n < 0) {
 824                if (p != *iov)
 825                        kfree(p);
 826                *iov = NULL;
 827                return n;
 828        }
 829        iov_iter_init(i, type, p, nr_segs, n);
 830        *iov = p == *iov ? NULL : p;
 831        return 0;
 832}
 833EXPORT_SYMBOL(import_iovec);
 834
 835#ifdef CONFIG_COMPAT
 836#include <linux/compat.h>
 837
 838int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
 839                 unsigned nr_segs, unsigned fast_segs,
 840                 struct iovec **iov, struct iov_iter *i)
 841{
 842        ssize_t n;
 843        struct iovec *p;
 844        n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
 845                                  *iov, &p);
 846        if (n < 0) {
 847                if (p != *iov)
 848                        kfree(p);
 849                *iov = NULL;
 850                return n;
 851        }
 852        iov_iter_init(i, type, p, nr_segs, n);
 853        *iov = p == *iov ? NULL : p;
 854        return 0;
 855}
 856#endif
 857
 858int import_single_range(int rw, void __user *buf, size_t len,
 859                 struct iovec *iov, struct iov_iter *i)
 860{
 861        if (len > MAX_RW_COUNT)
 862                len = MAX_RW_COUNT;
 863        if (unlikely(!access_ok(!rw, buf, len)))
 864                return -EFAULT;
 865
 866        iov->iov_base = buf;
 867        iov->iov_len = len;
 868        iov_iter_init(i, rw, iov, 1, len);
 869        return 0;
 870}
 871EXPORT_SYMBOL(import_single_range);
 872