linux/block/blk-map.c
<<
>>
Prefs
   1/*
   2 * Functions related to mapping data to requests
   3 */
   4#include <linux/kernel.h>
   5#include <linux/sched/task_stack.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/uio.h>
  10
  11#include "blk.h"
  12
  13/*
  14 * Append a bio to a passthrough request.  Only works can be merged into
  15 * the request based on the driver constraints.
  16 */
  17int blk_rq_append_bio(struct request *rq, struct bio *bio)
  18{
  19        blk_queue_bounce(rq->q, &bio);
  20
  21        if (!rq->bio) {
  22                blk_rq_bio_prep(rq->q, rq, bio);
  23        } else {
  24                if (!ll_back_merge_fn(rq->q, rq, bio))
  25                        return -EINVAL;
  26
  27                rq->biotail->bi_next = bio;
  28                rq->biotail = bio;
  29                rq->__data_len += bio->bi_iter.bi_size;
  30        }
  31
  32        return 0;
  33}
  34EXPORT_SYMBOL(blk_rq_append_bio);
  35
  36static int __blk_rq_unmap_user(struct bio *bio)
  37{
  38        int ret = 0;
  39
  40        if (bio) {
  41                if (bio_flagged(bio, BIO_USER_MAPPED))
  42                        bio_unmap_user(bio);
  43                else
  44                        ret = bio_uncopy_user(bio);
  45        }
  46
  47        return ret;
  48}
  49
  50static int __blk_rq_map_user_iov(struct request *rq,
  51                struct rq_map_data *map_data, struct iov_iter *iter,
  52                gfp_t gfp_mask, bool copy)
  53{
  54        struct request_queue *q = rq->q;
  55        struct bio *bio, *orig_bio;
  56        int ret;
  57
  58        if (copy)
  59                bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
  60        else
  61                bio = bio_map_user_iov(q, iter, gfp_mask);
  62
  63        if (IS_ERR(bio))
  64                return PTR_ERR(bio);
  65
  66        bio->bi_opf &= ~REQ_OP_MASK;
  67        bio->bi_opf |= req_op(rq);
  68
  69        if (map_data && map_data->null_mapped)
  70                bio_set_flag(bio, BIO_NULL_MAPPED);
  71
  72        iov_iter_advance(iter, bio->bi_iter.bi_size);
  73        if (map_data)
  74                map_data->offset += bio->bi_iter.bi_size;
  75
  76        orig_bio = bio;
  77
  78        /*
  79         * We link the bounce buffer in and could have to traverse it
  80         * later so we have to get a ref to prevent it from being freed
  81         */
  82        ret = blk_rq_append_bio(rq, bio);
  83        bio_get(bio);
  84        if (ret) {
  85                bio_endio(bio);
  86                __blk_rq_unmap_user(orig_bio);
  87                bio_put(bio);
  88                return ret;
  89        }
  90
  91        return 0;
  92}
  93
  94/**
  95 * blk_rq_map_user_iov - map user data to a request, for passthrough requests
  96 * @q:          request queue where request should be inserted
  97 * @rq:         request to map data to
  98 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
  99 * @iter:       iovec iterator
 100 * @gfp_mask:   memory allocation flags
 101 *
 102 * Description:
 103 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
 104 *    a kernel bounce buffer is used.
 105 *
 106 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
 107 *    still in process context.
 108 *
 109 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 110 *    before being submitted to the device, as pages mapped may be out of
 111 *    reach. It's the callers responsibility to make sure this happens. The
 112 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 113 *    unmapping.
 114 */
 115int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 116                        struct rq_map_data *map_data,
 117                        const struct iov_iter *iter, gfp_t gfp_mask)
 118{
 119        bool copy = false;
 120        unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
 121        struct bio *bio = NULL;
 122        struct iov_iter i;
 123        int ret;
 124
 125        if (!iter_is_iovec(iter))
 126                goto fail;
 127
 128        if (map_data)
 129                copy = true;
 130        else if (iov_iter_alignment(iter) & align)
 131                copy = true;
 132        else if (queue_virt_boundary(q))
 133                copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
 134
 135        i = *iter;
 136        do {
 137                ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
 138                if (ret)
 139                        goto unmap_rq;
 140                if (!bio)
 141                        bio = rq->bio;
 142        } while (iov_iter_count(&i));
 143
 144        if (!bio_flagged(bio, BIO_USER_MAPPED))
 145                rq->rq_flags |= RQF_COPY_USER;
 146        return 0;
 147
 148unmap_rq:
 149        __blk_rq_unmap_user(bio);
 150fail:
 151        rq->bio = NULL;
 152        return -EINVAL;
 153}
 154EXPORT_SYMBOL(blk_rq_map_user_iov);
 155
 156int blk_rq_map_user(struct request_queue *q, struct request *rq,
 157                    struct rq_map_data *map_data, void __user *ubuf,
 158                    unsigned long len, gfp_t gfp_mask)
 159{
 160        struct iovec iov;
 161        struct iov_iter i;
 162        int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
 163
 164        if (unlikely(ret < 0))
 165                return ret;
 166
 167        return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
 168}
 169EXPORT_SYMBOL(blk_rq_map_user);
 170
 171/**
 172 * blk_rq_unmap_user - unmap a request with user data
 173 * @bio:               start of bio list
 174 *
 175 * Description:
 176 *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
 177 *    supply the original rq->bio from the blk_rq_map_user() return, since
 178 *    the I/O completion may have changed rq->bio.
 179 */
 180int blk_rq_unmap_user(struct bio *bio)
 181{
 182        struct bio *mapped_bio;
 183        int ret = 0, ret2;
 184
 185        while (bio) {
 186                mapped_bio = bio;
 187                if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
 188                        mapped_bio = bio->bi_private;
 189
 190                ret2 = __blk_rq_unmap_user(mapped_bio);
 191                if (ret2 && !ret)
 192                        ret = ret2;
 193
 194                mapped_bio = bio;
 195                bio = bio->bi_next;
 196                bio_put(mapped_bio);
 197        }
 198
 199        return ret;
 200}
 201EXPORT_SYMBOL(blk_rq_unmap_user);
 202
 203/**
 204 * blk_rq_map_kern - map kernel data to a request, for passthrough requests
 205 * @q:          request queue where request should be inserted
 206 * @rq:         request to fill
 207 * @kbuf:       the kernel buffer
 208 * @len:        length of user data
 209 * @gfp_mask:   memory allocation flags
 210 *
 211 * Description:
 212 *    Data will be mapped directly if possible. Otherwise a bounce
 213 *    buffer is used. Can be called multiple times to append multiple
 214 *    buffers.
 215 */
 216int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 217                    unsigned int len, gfp_t gfp_mask)
 218{
 219        int reading = rq_data_dir(rq) == READ;
 220        unsigned long addr = (unsigned long) kbuf;
 221        int do_copy = 0;
 222        struct bio *bio;
 223        int ret;
 224
 225        if (len > (queue_max_hw_sectors(q) << 9))
 226                return -EINVAL;
 227        if (!len || !kbuf)
 228                return -EINVAL;
 229
 230        do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
 231        if (do_copy)
 232                bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
 233        else
 234                bio = bio_map_kern(q, kbuf, len, gfp_mask);
 235
 236        if (IS_ERR(bio))
 237                return PTR_ERR(bio);
 238
 239        bio->bi_opf &= ~REQ_OP_MASK;
 240        bio->bi_opf |= req_op(rq);
 241
 242        if (do_copy)
 243                rq->rq_flags |= RQF_COPY_USER;
 244
 245        ret = blk_rq_append_bio(rq, bio);
 246        if (unlikely(ret)) {
 247                /* request is too big */
 248                bio_put(bio);
 249                return ret;
 250        }
 251
 252        return 0;
 253}
 254EXPORT_SYMBOL(blk_rq_map_kern);
 255