linux/block/blk-map.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to mapping data to requests
   4 */
   5#include <linux/kernel.h>
   6#include <linux/sched/task_stack.h>
   7#include <linux/module.h>
   8#include <linux/bio.h>
   9#include <linux/blkdev.h>
  10#include <linux/uio.h>
  11
  12#include "blk.h"
  13
  14struct bio_map_data {
  15        bool is_our_pages : 1;
  16        bool is_null_mapped : 1;
  17        struct iov_iter iter;
  18        struct iovec iov[];
  19};
  20
  21static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
  22                                               gfp_t gfp_mask)
  23{
  24        struct bio_map_data *bmd;
  25
  26        if (data->nr_segs > UIO_MAXIOV)
  27                return NULL;
  28
  29        bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
  30        if (!bmd)
  31                return NULL;
  32        memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
  33        bmd->iter = *data;
  34        bmd->iter.iov = bmd->iov;
  35        return bmd;
  36}
  37
  38/**
  39 * bio_copy_from_iter - copy all pages from iov_iter to bio
  40 * @bio: The &struct bio which describes the I/O as destination
  41 * @iter: iov_iter as source
  42 *
  43 * Copy all pages from iov_iter to bio.
  44 * Returns 0 on success, or error on failure.
  45 */
  46static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
  47{
  48        struct bio_vec *bvec;
  49        struct bvec_iter_all iter_all;
  50
  51        bio_for_each_segment_all(bvec, bio, iter_all) {
  52                ssize_t ret;
  53
  54                ret = copy_page_from_iter(bvec->bv_page,
  55                                          bvec->bv_offset,
  56                                          bvec->bv_len,
  57                                          iter);
  58
  59                if (!iov_iter_count(iter))
  60                        break;
  61
  62                if (ret < bvec->bv_len)
  63                        return -EFAULT;
  64        }
  65
  66        return 0;
  67}
  68
  69/**
  70 * bio_copy_to_iter - copy all pages from bio to iov_iter
  71 * @bio: The &struct bio which describes the I/O as source
  72 * @iter: iov_iter as destination
  73 *
  74 * Copy all pages from bio to iov_iter.
  75 * Returns 0 on success, or error on failure.
  76 */
  77static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
  78{
  79        struct bio_vec *bvec;
  80        struct bvec_iter_all iter_all;
  81
  82        bio_for_each_segment_all(bvec, bio, iter_all) {
  83                ssize_t ret;
  84
  85                ret = copy_page_to_iter(bvec->bv_page,
  86                                        bvec->bv_offset,
  87                                        bvec->bv_len,
  88                                        &iter);
  89
  90                if (!iov_iter_count(&iter))
  91                        break;
  92
  93                if (ret < bvec->bv_len)
  94                        return -EFAULT;
  95        }
  96
  97        return 0;
  98}
  99
 100/**
 101 *      bio_uncopy_user -       finish previously mapped bio
 102 *      @bio: bio being terminated
 103 *
 104 *      Free pages allocated from bio_copy_user_iov() and write back data
 105 *      to user space in case of a read.
 106 */
 107static int bio_uncopy_user(struct bio *bio)
 108{
 109        struct bio_map_data *bmd = bio->bi_private;
 110        int ret = 0;
 111
 112        if (!bmd->is_null_mapped) {
 113                /*
 114                 * if we're in a workqueue, the request is orphaned, so
 115                 * don't copy into a random user address space, just free
 116                 * and return -EINTR so user space doesn't expect any data.
 117                 */
 118                if (!current->mm)
 119                        ret = -EINTR;
 120                else if (bio_data_dir(bio) == READ)
 121                        ret = bio_copy_to_iter(bio, bmd->iter);
 122                if (bmd->is_our_pages)
 123                        bio_free_pages(bio);
 124        }
 125        kfree(bmd);
 126        return ret;
 127}
 128
 129static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
 130                struct iov_iter *iter, gfp_t gfp_mask)
 131{
 132        struct bio_map_data *bmd;
 133        struct page *page;
 134        struct bio *bio;
 135        int i = 0, ret;
 136        int nr_pages;
 137        unsigned int len = iter->count;
 138        unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
 139
 140        bmd = bio_alloc_map_data(iter, gfp_mask);
 141        if (!bmd)
 142                return -ENOMEM;
 143
 144        /*
 145         * We need to do a deep copy of the iov_iter including the iovecs.
 146         * The caller provided iov might point to an on-stack or otherwise
 147         * shortlived one.
 148         */
 149        bmd->is_our_pages = !map_data;
 150        bmd->is_null_mapped = (map_data && map_data->null_mapped);
 151
 152        nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
 153
 154        ret = -ENOMEM;
 155        bio = bio_kmalloc(gfp_mask, nr_pages);
 156        if (!bio)
 157                goto out_bmd;
 158        bio->bi_opf |= req_op(rq);
 159
 160        if (map_data) {
 161                nr_pages = 1 << map_data->page_order;
 162                i = map_data->offset / PAGE_SIZE;
 163        }
 164        while (len) {
 165                unsigned int bytes = PAGE_SIZE;
 166
 167                bytes -= offset;
 168
 169                if (bytes > len)
 170                        bytes = len;
 171
 172                if (map_data) {
 173                        if (i == map_data->nr_entries * nr_pages) {
 174                                ret = -ENOMEM;
 175                                goto cleanup;
 176                        }
 177
 178                        page = map_data->pages[i / nr_pages];
 179                        page += (i % nr_pages);
 180
 181                        i++;
 182                } else {
 183                        page = alloc_page(GFP_NOIO | gfp_mask);
 184                        if (!page) {
 185                                ret = -ENOMEM;
 186                                goto cleanup;
 187                        }
 188                }
 189
 190                if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
 191                        if (!map_data)
 192                                __free_page(page);
 193                        break;
 194                }
 195
 196                len -= bytes;
 197                offset = 0;
 198        }
 199
 200        if (map_data)
 201                map_data->offset += bio->bi_iter.bi_size;
 202
 203        /*
 204         * success
 205         */
 206        if ((iov_iter_rw(iter) == WRITE &&
 207             (!map_data || !map_data->null_mapped)) ||
 208            (map_data && map_data->from_user)) {
 209                ret = bio_copy_from_iter(bio, iter);
 210                if (ret)
 211                        goto cleanup;
 212        } else {
 213                if (bmd->is_our_pages)
 214                        zero_fill_bio(bio);
 215                iov_iter_advance(iter, bio->bi_iter.bi_size);
 216        }
 217
 218        bio->bi_private = bmd;
 219
 220        ret = blk_rq_append_bio(rq, bio);
 221        if (ret)
 222                goto cleanup;
 223        return 0;
 224cleanup:
 225        if (!map_data)
 226                bio_free_pages(bio);
 227        bio_put(bio);
 228out_bmd:
 229        kfree(bmd);
 230        return ret;
 231}
 232
 233static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
 234                gfp_t gfp_mask)
 235{
 236        unsigned int max_sectors = queue_max_hw_sectors(rq->q);
 237        struct bio *bio;
 238        int ret;
 239        int j;
 240
 241        if (!iov_iter_count(iter))
 242                return -EINVAL;
 243
 244        bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS));
 245        if (!bio)
 246                return -ENOMEM;
 247        bio->bi_opf |= req_op(rq);
 248
 249        while (iov_iter_count(iter)) {
 250                struct page **pages;
 251                ssize_t bytes;
 252                size_t offs, added = 0;
 253                int npages;
 254
 255                bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
 256                if (unlikely(bytes <= 0)) {
 257                        ret = bytes ? bytes : -EFAULT;
 258                        goto out_unmap;
 259                }
 260
 261                npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
 262
 263                if (unlikely(offs & queue_dma_alignment(rq->q))) {
 264                        ret = -EINVAL;
 265                        j = 0;
 266                } else {
 267                        for (j = 0; j < npages; j++) {
 268                                struct page *page = pages[j];
 269                                unsigned int n = PAGE_SIZE - offs;
 270                                bool same_page = false;
 271
 272                                if (n > bytes)
 273                                        n = bytes;
 274
 275                                if (!bio_add_hw_page(rq->q, bio, page, n, offs,
 276                                                     max_sectors, &same_page)) {
 277                                        if (same_page)
 278                                                put_page(page);
 279                                        break;
 280                                }
 281
 282                                added += n;
 283                                bytes -= n;
 284                                offs = 0;
 285                        }
 286                        iov_iter_advance(iter, added);
 287                }
 288                /*
 289                 * release the pages we didn't map into the bio, if any
 290                 */
 291                while (j < npages)
 292                        put_page(pages[j++]);
 293                kvfree(pages);
 294                /* couldn't stuff something into bio? */
 295                if (bytes)
 296                        break;
 297        }
 298
 299        ret = blk_rq_append_bio(rq, bio);
 300        if (ret)
 301                goto out_unmap;
 302        return 0;
 303
 304 out_unmap:
 305        bio_release_pages(bio, false);
 306        bio_put(bio);
 307        return ret;
 308}
 309
 310static void bio_invalidate_vmalloc_pages(struct bio *bio)
 311{
 312#ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
 313        if (bio->bi_private && !op_is_write(bio_op(bio))) {
 314                unsigned long i, len = 0;
 315
 316                for (i = 0; i < bio->bi_vcnt; i++)
 317                        len += bio->bi_io_vec[i].bv_len;
 318                invalidate_kernel_vmap_range(bio->bi_private, len);
 319        }
 320#endif
 321}
 322
 323static void bio_map_kern_endio(struct bio *bio)
 324{
 325        bio_invalidate_vmalloc_pages(bio);
 326        bio_put(bio);
 327}
 328
 329/**
 330 *      bio_map_kern    -       map kernel address into bio
 331 *      @q: the struct request_queue for the bio
 332 *      @data: pointer to buffer to map
 333 *      @len: length in bytes
 334 *      @gfp_mask: allocation flags for bio allocation
 335 *
 336 *      Map the kernel address into a bio suitable for io to a block
 337 *      device. Returns an error pointer in case of error.
 338 */
 339static struct bio *bio_map_kern(struct request_queue *q, void *data,
 340                unsigned int len, gfp_t gfp_mask)
 341{
 342        unsigned long kaddr = (unsigned long)data;
 343        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 344        unsigned long start = kaddr >> PAGE_SHIFT;
 345        const int nr_pages = end - start;
 346        bool is_vmalloc = is_vmalloc_addr(data);
 347        struct page *page;
 348        int offset, i;
 349        struct bio *bio;
 350
 351        bio = bio_kmalloc(gfp_mask, nr_pages);
 352        if (!bio)
 353                return ERR_PTR(-ENOMEM);
 354
 355        if (is_vmalloc) {
 356                flush_kernel_vmap_range(data, len);
 357                bio->bi_private = data;
 358        }
 359
 360        offset = offset_in_page(kaddr);
 361        for (i = 0; i < nr_pages; i++) {
 362                unsigned int bytes = PAGE_SIZE - offset;
 363
 364                if (len <= 0)
 365                        break;
 366
 367                if (bytes > len)
 368                        bytes = len;
 369
 370                if (!is_vmalloc)
 371                        page = virt_to_page(data);
 372                else
 373                        page = vmalloc_to_page(data);
 374                if (bio_add_pc_page(q, bio, page, bytes,
 375                                    offset) < bytes) {
 376                        /* we don't support partial mappings */
 377                        bio_put(bio);
 378                        return ERR_PTR(-EINVAL);
 379                }
 380
 381                data += bytes;
 382                len -= bytes;
 383                offset = 0;
 384        }
 385
 386        bio->bi_end_io = bio_map_kern_endio;
 387        return bio;
 388}
 389
 390static void bio_copy_kern_endio(struct bio *bio)
 391{
 392        bio_free_pages(bio);
 393        bio_put(bio);
 394}
 395
 396static void bio_copy_kern_endio_read(struct bio *bio)
 397{
 398        char *p = bio->bi_private;
 399        struct bio_vec *bvec;
 400        struct bvec_iter_all iter_all;
 401
 402        bio_for_each_segment_all(bvec, bio, iter_all) {
 403                memcpy_from_bvec(p, bvec);
 404                p += bvec->bv_len;
 405        }
 406
 407        bio_copy_kern_endio(bio);
 408}
 409
 410/**
 411 *      bio_copy_kern   -       copy kernel address into bio
 412 *      @q: the struct request_queue for the bio
 413 *      @data: pointer to buffer to copy
 414 *      @len: length in bytes
 415 *      @gfp_mask: allocation flags for bio and page allocation
 416 *      @reading: data direction is READ
 417 *
 418 *      copy the kernel address into a bio suitable for io to a block
 419 *      device. Returns an error pointer in case of error.
 420 */
 421static struct bio *bio_copy_kern(struct request_queue *q, void *data,
 422                unsigned int len, gfp_t gfp_mask, int reading)
 423{
 424        unsigned long kaddr = (unsigned long)data;
 425        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 426        unsigned long start = kaddr >> PAGE_SHIFT;
 427        struct bio *bio;
 428        void *p = data;
 429        int nr_pages = 0;
 430
 431        /*
 432         * Overflow, abort
 433         */
 434        if (end < start)
 435                return ERR_PTR(-EINVAL);
 436
 437        nr_pages = end - start;
 438        bio = bio_kmalloc(gfp_mask, nr_pages);
 439        if (!bio)
 440                return ERR_PTR(-ENOMEM);
 441
 442        while (len) {
 443                struct page *page;
 444                unsigned int bytes = PAGE_SIZE;
 445
 446                if (bytes > len)
 447                        bytes = len;
 448
 449                page = alloc_page(GFP_NOIO | gfp_mask);
 450                if (!page)
 451                        goto cleanup;
 452
 453                if (!reading)
 454                        memcpy(page_address(page), p, bytes);
 455
 456                if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
 457                        break;
 458
 459                len -= bytes;
 460                p += bytes;
 461        }
 462
 463        if (reading) {
 464                bio->bi_end_io = bio_copy_kern_endio_read;
 465                bio->bi_private = data;
 466        } else {
 467                bio->bi_end_io = bio_copy_kern_endio;
 468        }
 469
 470        return bio;
 471
 472cleanup:
 473        bio_free_pages(bio);
 474        bio_put(bio);
 475        return ERR_PTR(-ENOMEM);
 476}
 477
 478/*
 479 * Append a bio to a passthrough request.  Only works if the bio can be merged
 480 * into the request based on the driver constraints.
 481 */
 482int blk_rq_append_bio(struct request *rq, struct bio *bio)
 483{
 484        struct bvec_iter iter;
 485        struct bio_vec bv;
 486        unsigned int nr_segs = 0;
 487
 488        bio_for_each_bvec(bv, bio, iter)
 489                nr_segs++;
 490
 491        if (!rq->bio) {
 492                blk_rq_bio_prep(rq, bio, nr_segs);
 493        } else {
 494                if (!ll_back_merge_fn(rq, bio, nr_segs))
 495                        return -EINVAL;
 496                rq->biotail->bi_next = bio;
 497                rq->biotail = bio;
 498                rq->__data_len += (bio)->bi_iter.bi_size;
 499                bio_crypt_free_ctx(bio);
 500        }
 501
 502        return 0;
 503}
 504EXPORT_SYMBOL(blk_rq_append_bio);
 505
 506/**
 507 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
 508 * @q:          request queue where request should be inserted
 509 * @rq:         request to map data to
 510 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
 511 * @iter:       iovec iterator
 512 * @gfp_mask:   memory allocation flags
 513 *
 514 * Description:
 515 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
 516 *    a kernel bounce buffer is used.
 517 *
 518 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
 519 *    still in process context.
 520 */
 521int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 522                        struct rq_map_data *map_data,
 523                        const struct iov_iter *iter, gfp_t gfp_mask)
 524{
 525        bool copy = false;
 526        unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
 527        struct bio *bio = NULL;
 528        struct iov_iter i;
 529        int ret = -EINVAL;
 530
 531        if (!iter_is_iovec(iter))
 532                goto fail;
 533
 534        if (map_data)
 535                copy = true;
 536        else if (blk_queue_may_bounce(q))
 537                copy = true;
 538        else if (iov_iter_alignment(iter) & align)
 539                copy = true;
 540        else if (queue_virt_boundary(q))
 541                copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
 542
 543        i = *iter;
 544        do {
 545                if (copy)
 546                        ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
 547                else
 548                        ret = bio_map_user_iov(rq, &i, gfp_mask);
 549                if (ret)
 550                        goto unmap_rq;
 551                if (!bio)
 552                        bio = rq->bio;
 553        } while (iov_iter_count(&i));
 554
 555        return 0;
 556
 557unmap_rq:
 558        blk_rq_unmap_user(bio);
 559fail:
 560        rq->bio = NULL;
 561        return ret;
 562}
 563EXPORT_SYMBOL(blk_rq_map_user_iov);
 564
 565int blk_rq_map_user(struct request_queue *q, struct request *rq,
 566                    struct rq_map_data *map_data, void __user *ubuf,
 567                    unsigned long len, gfp_t gfp_mask)
 568{
 569        struct iovec iov;
 570        struct iov_iter i;
 571        int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
 572
 573        if (unlikely(ret < 0))
 574                return ret;
 575
 576        return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
 577}
 578EXPORT_SYMBOL(blk_rq_map_user);
 579
 580/**
 581 * blk_rq_unmap_user - unmap a request with user data
 582 * @bio:               start of bio list
 583 *
 584 * Description:
 585 *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
 586 *    supply the original rq->bio from the blk_rq_map_user() return, since
 587 *    the I/O completion may have changed rq->bio.
 588 */
 589int blk_rq_unmap_user(struct bio *bio)
 590{
 591        struct bio *next_bio;
 592        int ret = 0, ret2;
 593
 594        while (bio) {
 595                if (bio->bi_private) {
 596                        ret2 = bio_uncopy_user(bio);
 597                        if (ret2 && !ret)
 598                                ret = ret2;
 599                } else {
 600                        bio_release_pages(bio, bio_data_dir(bio) == READ);
 601                }
 602
 603                next_bio = bio;
 604                bio = bio->bi_next;
 605                bio_put(next_bio);
 606        }
 607
 608        return ret;
 609}
 610EXPORT_SYMBOL(blk_rq_unmap_user);
 611
 612/**
 613 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
 614 * @q:          request queue where request should be inserted
 615 * @rq:         request to fill
 616 * @kbuf:       the kernel buffer
 617 * @len:        length of user data
 618 * @gfp_mask:   memory allocation flags
 619 *
 620 * Description:
 621 *    Data will be mapped directly if possible. Otherwise a bounce
 622 *    buffer is used. Can be called multiple times to append multiple
 623 *    buffers.
 624 */
 625int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 626                    unsigned int len, gfp_t gfp_mask)
 627{
 628        int reading = rq_data_dir(rq) == READ;
 629        unsigned long addr = (unsigned long) kbuf;
 630        struct bio *bio;
 631        int ret;
 632
 633        if (len > (queue_max_hw_sectors(q) << 9))
 634                return -EINVAL;
 635        if (!len || !kbuf)
 636                return -EINVAL;
 637
 638        if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
 639            blk_queue_may_bounce(q))
 640                bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
 641        else
 642                bio = bio_map_kern(q, kbuf, len, gfp_mask);
 643
 644        if (IS_ERR(bio))
 645                return PTR_ERR(bio);
 646
 647        bio->bi_opf &= ~REQ_OP_MASK;
 648        bio->bi_opf |= req_op(rq);
 649
 650        ret = blk_rq_append_bio(rq, bio);
 651        if (unlikely(ret))
 652                bio_put(bio);
 653        return ret;
 654}
 655EXPORT_SYMBOL(blk_rq_map_kern);
 656