linux/block/blk-map.c
<<
>>
Prefs
   1/*
   2 * Functions related to mapping data to requests
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <scsi/sg.h>            /* for struct sg_iovec */
   9
  10#include "blk.h"
  11
  12static bool iovec_gap_to_prv(struct request_queue *q,
  13                             struct sg_iovec *prv, struct sg_iovec *cur)
  14{
  15        unsigned long prev_end;
  16
  17        if (!queue_virt_boundary(q))
  18                return false;
  19
  20        if (prv->iov_base == NULL && prv->iov_len == 0)
  21                /* prv is not set - don't check */
  22                return false;
  23
  24        prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
  25
  26        return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
  27                prev_end & queue_virt_boundary(q));
  28}
  29
  30int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  31                      struct bio *bio)
  32{
  33        if (!rq->bio)
  34                blk_rq_bio_prep(q, rq, bio);
  35        else if (!ll_back_merge_fn(q, rq, bio))
  36                return -EINVAL;
  37        else {
  38                rq->biotail->bi_next = bio;
  39                rq->biotail = bio;
  40
  41                rq->__data_len += bio->bi_size;
  42        }
  43        return 0;
  44}
  45
  46static int __blk_rq_unmap_user(struct bio *bio)
  47{
  48        int ret = 0;
  49
  50        if (bio) {
  51                if (bio_flagged(bio, BIO_USER_MAPPED))
  52                        bio_unmap_user(bio);
  53                else
  54                        ret = bio_uncopy_user(bio);
  55        }
  56
  57        return ret;
  58}
  59
  60static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
  61                             struct rq_map_data *map_data, void __user *ubuf,
  62                             unsigned int len, gfp_t gfp_mask)
  63{
  64        unsigned long uaddr;
  65        struct bio *bio, *orig_bio;
  66        int reading, ret;
  67
  68        reading = rq_data_dir(rq) == READ;
  69
  70        /*
  71         * if alignment requirement is satisfied, map in user pages for
  72         * direct dma. else, set up kernel bounce buffers
  73         */
  74        uaddr = (unsigned long) ubuf;
  75        if (blk_rq_aligned(q, uaddr, len) && !map_data)
  76                bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
  77        else
  78                bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
  79
  80        if (IS_ERR(bio))
  81                return PTR_ERR(bio);
  82
  83        if (map_data && map_data->null_mapped)
  84                bio->bi_flags |= (1 << BIO_NULL_MAPPED);
  85
  86        orig_bio = bio;
  87        blk_queue_bounce(q, &bio);
  88
  89        /*
  90         * We link the bounce buffer in and could have to traverse it
  91         * later so we have to get a ref to prevent it from being freed
  92         */
  93        bio_get(bio);
  94
  95        ret = blk_rq_append_bio(q, rq, bio);
  96        if (!ret)
  97                return bio->bi_size;
  98
  99        /* if it was boucned we must call the end io function */
 100        bio_endio(bio, 0);
 101        __blk_rq_unmap_user(orig_bio);
 102        bio_put(bio);
 103        return ret;
 104}
 105
 106/**
 107 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
 108 * @q:          request queue where request should be inserted
 109 * @rq:         request structure to fill
 110 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
 111 * @ubuf:       the user buffer
 112 * @len:        length of user data
 113 * @gfp_mask:   memory allocation flags
 114 *
 115 * Description:
 116 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
 117 *    a kernel bounce buffer is used.
 118 *
 119 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
 120 *    still in process context.
 121 *
 122 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 123 *    before being submitted to the device, as pages mapped may be out of
 124 *    reach. It's the callers responsibility to make sure this happens. The
 125 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 126 *    unmapping.
 127 */
 128int blk_rq_map_user(struct request_queue *q, struct request *rq,
 129                    struct rq_map_data *map_data, void __user *ubuf,
 130                    unsigned long len, gfp_t gfp_mask)
 131{
 132        unsigned long bytes_read = 0;
 133        struct bio *bio = NULL;
 134        int ret;
 135
 136        if (len > (queue_max_hw_sectors(q) << 9))
 137                return -EINVAL;
 138        if (!len)
 139                return -EINVAL;
 140
 141        if (!ubuf && (!map_data || !map_data->null_mapped))
 142                return -EINVAL;
 143
 144        while (bytes_read != len) {
 145                unsigned long map_len, end, start;
 146
 147                map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
 148                end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
 149                                                                >> PAGE_SHIFT;
 150                start = (unsigned long)ubuf >> PAGE_SHIFT;
 151
 152                /*
 153                 * A bad offset could cause us to require BIO_MAX_PAGES + 1
 154                 * pages. If this happens we just lower the requested
 155                 * mapping len by a page so that we can fit
 156                 */
 157                if (end - start > BIO_MAX_PAGES)
 158                        map_len -= PAGE_SIZE;
 159
 160                ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
 161                                        gfp_mask);
 162                if (ret < 0)
 163                        goto unmap_rq;
 164                if (!bio)
 165                        bio = rq->bio;
 166                bytes_read += ret;
 167                ubuf += ret;
 168
 169                if (map_data)
 170                        map_data->offset += ret;
 171        }
 172
 173        if (!bio_flagged(bio, BIO_USER_MAPPED))
 174                rq->cmd_flags |= REQ_COPY_USER;
 175
 176        rq->buffer = NULL;
 177        return 0;
 178unmap_rq:
 179        blk_rq_unmap_user(bio);
 180        rq->bio = NULL;
 181        return ret;
 182}
 183EXPORT_SYMBOL(blk_rq_map_user);
 184
 185/**
 186 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
 187 * @q:          request queue where request should be inserted
 188 * @rq:         request to map data to
 189 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
 190 * @iov:        pointer to the iovec
 191 * @iov_count:  number of elements in the iovec
 192 * @len:        I/O byte count
 193 * @gfp_mask:   memory allocation flags
 194 *
 195 * Description:
 196 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
 197 *    a kernel bounce buffer is used.
 198 *
 199 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
 200 *    still in process context.
 201 *
 202 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 203 *    before being submitted to the device, as pages mapped may be out of
 204 *    reach. It's the callers responsibility to make sure this happens. The
 205 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 206 *    unmapping.
 207 */
 208int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 209                        struct rq_map_data *map_data, struct sg_iovec *iov,
 210                        int iov_count, unsigned int len, gfp_t gfp_mask)
 211{
 212        struct bio *bio;
 213        int i, read = rq_data_dir(rq) == READ;
 214        int unaligned = 0;
 215        struct sg_iovec prv = {.iov_base = NULL, .iov_len = 0};
 216
 217        if (!iov || iov_count <= 0)
 218                return -EINVAL;
 219
 220        for (i = 0; i < iov_count; i++) {
 221                unsigned long uaddr = (unsigned long)iov[i].iov_base;
 222
 223                if (!iov[i].iov_len)
 224                        return -EINVAL;
 225
 226                /*
 227                 * Keep going so we check length of all segments
 228                 */
 229                if (uaddr & queue_dma_alignment(q) ||
 230                    iovec_gap_to_prv(q, &prv, &iov[i]))
 231                        unaligned = 1;
 232
 233                prv.iov_base = iov[i].iov_base;
 234                prv.iov_len = iov[i].iov_len;
 235        }
 236
 237        if (unaligned || (q->dma_pad_mask & len) || map_data)
 238                bio = bio_copy_user_iov(q, map_data, iov, iov_count, read,
 239                                        gfp_mask);
 240        else
 241                bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask);
 242
 243        if (IS_ERR(bio))
 244                return PTR_ERR(bio);
 245
 246        if (bio->bi_size != len) {
 247                /*
 248                 * Grab an extra reference to this bio, as bio_unmap_user()
 249                 * expects to be able to drop it twice as it happens on the
 250                 * normal IO completion path
 251                 */
 252                bio_get(bio);
 253                bio_endio(bio, 0);
 254                __blk_rq_unmap_user(bio);
 255                return -EINVAL;
 256        }
 257
 258        if (!bio_flagged(bio, BIO_USER_MAPPED))
 259                rq->cmd_flags |= REQ_COPY_USER;
 260
 261        blk_queue_bounce(q, &bio);
 262        bio_get(bio);
 263        blk_rq_bio_prep(q, rq, bio);
 264        rq->buffer = NULL;
 265        return 0;
 266}
 267EXPORT_SYMBOL(blk_rq_map_user_iov);
 268
 269/**
 270 * blk_rq_unmap_user - unmap a request with user data
 271 * @bio:               start of bio list
 272 *
 273 * Description:
 274 *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
 275 *    supply the original rq->bio from the blk_rq_map_user() return, since
 276 *    the I/O completion may have changed rq->bio.
 277 */
 278int blk_rq_unmap_user(struct bio *bio)
 279{
 280        struct bio *mapped_bio;
 281        int ret = 0, ret2;
 282
 283        while (bio) {
 284                mapped_bio = bio;
 285                if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
 286                        mapped_bio = bio->bi_private;
 287
 288                ret2 = __blk_rq_unmap_user(mapped_bio);
 289                if (ret2 && !ret)
 290                        ret = ret2;
 291
 292                mapped_bio = bio;
 293                bio = bio->bi_next;
 294                bio_put(mapped_bio);
 295        }
 296
 297        return ret;
 298}
 299EXPORT_SYMBOL(blk_rq_unmap_user);
 300
 301/**
 302 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
 303 * @q:          request queue where request should be inserted
 304 * @rq:         request to fill
 305 * @kbuf:       the kernel buffer
 306 * @len:        length of user data
 307 * @gfp_mask:   memory allocation flags
 308 *
 309 * Description:
 310 *    Data will be mapped directly if possible. Otherwise a bounce
 311 *    buffer is used. Can be called multple times to append multple
 312 *    buffers.
 313 */
 314int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 315                    unsigned int len, gfp_t gfp_mask)
 316{
 317        int reading = rq_data_dir(rq) == READ;
 318        unsigned long addr = (unsigned long) kbuf;
 319        int do_copy = 0;
 320        struct bio *bio;
 321        int ret;
 322
 323        if (len > (queue_max_hw_sectors(q) << 9))
 324                return -EINVAL;
 325        if (!len || !kbuf)
 326                return -EINVAL;
 327
 328        do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
 329        if (do_copy)
 330                bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
 331        else
 332                bio = bio_map_kern(q, kbuf, len, gfp_mask);
 333
 334        if (IS_ERR(bio))
 335                return PTR_ERR(bio);
 336
 337        if (!reading)
 338                bio->bi_rw |= REQ_WRITE;
 339
 340        if (do_copy)
 341                rq->cmd_flags |= REQ_COPY_USER;
 342
 343        ret = blk_rq_append_bio(q, rq, bio);
 344        if (unlikely(ret)) {
 345                /* request is too big */
 346                bio_put(bio);
 347                return ret;
 348        }
 349
 350        blk_queue_bounce(q, &rq->bio);
 351        rq->buffer = NULL;
 352        return 0;
 353}
 354EXPORT_SYMBOL(blk_rq_map_kern);
 355