linux/block/blk-merge.c
<<
>>
Prefs
   1/*
   2 * Functions related to segment and merge handling
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <linux/scatterlist.h>
   9
  10#include "blk.h"
  11
  12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
  13                                             struct bio *bio)
  14{
  15        struct bio_vec *bv, *bvprv = NULL;
  16        int cluster, i, high, highprv = 1;
  17        unsigned int seg_size, nr_phys_segs;
  18        struct bio *fbio, *bbio;
  19
  20        if (!bio)
  21                return 0;
  22
  23        fbio = bio;
  24        cluster = blk_queue_cluster(q);
  25        seg_size = 0;
  26        nr_phys_segs = 0;
  27        for_each_bio(bio) {
  28                bio_for_each_segment(bv, bio, i) {
  29                        /*
  30                         * the trick here is making sure that a high page is
  31                         * never considered part of another segment, since that
  32                         * might change with the bounce page.
  33                         */
  34                        high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
  35                        if (high || highprv)
  36                                goto new_segment;
  37                        if (cluster) {
  38                                if (seg_size + bv->bv_len
  39                                    > queue_max_segment_size(q))
  40                                        goto new_segment;
  41                                if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
  42                                        goto new_segment;
  43                                if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
  44                                        goto new_segment;
  45
  46                                seg_size += bv->bv_len;
  47                                bvprv = bv;
  48                                continue;
  49                        }
  50new_segment:
  51                        if (nr_phys_segs == 1 && seg_size >
  52                            fbio->bi_seg_front_size)
  53                                fbio->bi_seg_front_size = seg_size;
  54
  55                        nr_phys_segs++;
  56                        bvprv = bv;
  57                        seg_size = bv->bv_len;
  58                        highprv = high;
  59                }
  60                bbio = bio;
  61        }
  62
  63        if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  64                fbio->bi_seg_front_size = seg_size;
  65        if (seg_size > bbio->bi_seg_back_size)
  66                bbio->bi_seg_back_size = seg_size;
  67
  68        return nr_phys_segs;
  69}
  70
  71void blk_recalc_rq_segments(struct request *rq)
  72{
  73        rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
  74}
  75
  76void blk_recount_segments(struct request_queue *q, struct bio *bio)
  77{
  78        struct bio *nxt = bio->bi_next;
  79
  80        bio->bi_next = NULL;
  81        bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
  82        bio->bi_next = nxt;
  83        bio->bi_flags |= (1 << BIO_SEG_VALID);
  84}
  85EXPORT_SYMBOL(blk_recount_segments);
  86
  87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  88                                   struct bio *nxt)
  89{
  90        if (!blk_queue_cluster(q))
  91                return 0;
  92
  93        if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
  94            queue_max_segment_size(q))
  95                return 0;
  96
  97        if (!bio_has_data(bio))
  98                return 1;
  99
 100        if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
 101                return 0;
 102
 103        /*
 104         * bio and nxt are contiguous in memory; check if the queue allows
 105         * these two to be merged into one
 106         */
 107        if (BIO_SEG_BOUNDARY(q, bio, nxt))
 108                return 1;
 109
 110        return 0;
 111}
 112
 113static void
 114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
 115                     struct scatterlist *sglist, struct bio_vec **bvprv,
 116                     struct scatterlist **sg, int *nsegs, int *cluster)
 117{
 118
 119        int nbytes = bvec->bv_len;
 120
 121        if (*bvprv && *cluster) {
 122                if ((*sg)->length + nbytes > queue_max_segment_size(q))
 123                        goto new_segment;
 124
 125                if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
 126                        goto new_segment;
 127                if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
 128                        goto new_segment;
 129
 130                (*sg)->length += nbytes;
 131        } else {
 132new_segment:
 133                if (!*sg)
 134                        *sg = sglist;
 135                else {
 136                        /*
 137                         * If the driver previously mapped a shorter
 138                         * list, we could see a termination bit
 139                         * prematurely unless it fully inits the sg
 140                         * table on each mapping. We KNOW that there
 141                         * must be more entries here or the driver
 142                         * would be buggy, so force clear the
 143                         * termination bit to avoid doing a full
 144                         * sg_init_table() in drivers for each command.
 145                         */
 146                        sg_unmark_end(*sg);
 147                        *sg = sg_next(*sg);
 148                }
 149
 150                sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
 151                (*nsegs)++;
 152        }
 153        *bvprv = bvec;
 154}
 155
 156/*
 157 * map a request to scatterlist, return number of sg entries setup. Caller
 158 * must make sure sg can hold rq->nr_phys_segments entries
 159 */
 160int blk_rq_map_sg(struct request_queue *q, struct request *rq,
 161                  struct scatterlist *sglist)
 162{
 163        struct bio_vec *bvec, *bvprv;
 164        struct req_iterator iter;
 165        struct scatterlist *sg;
 166        int nsegs, cluster;
 167
 168        nsegs = 0;
 169        cluster = blk_queue_cluster(q);
 170
 171        /*
 172         * for each bio in rq
 173         */
 174        bvprv = NULL;
 175        sg = NULL;
 176        rq_for_each_segment(bvec, rq, iter) {
 177                __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
 178                                     &nsegs, &cluster);
 179        } /* segments in rq */
 180
 181
 182        if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
 183            (blk_rq_bytes(rq) & q->dma_pad_mask)) {
 184                unsigned int pad_len =
 185                        (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
 186
 187                sg->length += pad_len;
 188                rq->extra_len += pad_len;
 189        }
 190
 191        if (q->dma_drain_size && q->dma_drain_needed(rq)) {
 192                if (rq->cmd_flags & REQ_WRITE)
 193                        memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 194
 195                sg->page_link &= ~0x02;
 196                sg = sg_next(sg);
 197                sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
 198                            q->dma_drain_size,
 199                            ((unsigned long)q->dma_drain_buffer) &
 200                            (PAGE_SIZE - 1));
 201                nsegs++;
 202                rq->extra_len += q->dma_drain_size;
 203        }
 204
 205        if (sg)
 206                sg_mark_end(sg);
 207
 208        return nsegs;
 209}
 210EXPORT_SYMBOL(blk_rq_map_sg);
 211
 212/**
 213 * blk_bio_map_sg - map a bio to a scatterlist
 214 * @q: request_queue in question
 215 * @bio: bio being mapped
 216 * @sglist: scatterlist being mapped
 217 *
 218 * Note:
 219 *    Caller must make sure sg can hold bio->bi_phys_segments entries
 220 *
 221 * Will return the number of sg entries setup
 222 */
 223int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
 224                   struct scatterlist *sglist)
 225{
 226        struct bio_vec *bvec, *bvprv;
 227        struct scatterlist *sg;
 228        int nsegs, cluster;
 229        unsigned long i;
 230
 231        nsegs = 0;
 232        cluster = blk_queue_cluster(q);
 233
 234        bvprv = NULL;
 235        sg = NULL;
 236        bio_for_each_segment(bvec, bio, i) {
 237                __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
 238                                     &nsegs, &cluster);
 239        } /* segments in bio */
 240
 241        if (sg)
 242                sg_mark_end(sg);
 243
 244        BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
 245        return nsegs;
 246}
 247EXPORT_SYMBOL(blk_bio_map_sg);
 248
 249static inline int ll_new_hw_segment(struct request_queue *q,
 250                                    struct request *req,
 251                                    struct bio *bio)
 252{
 253        int nr_phys_segs = bio_phys_segments(q, bio);
 254
 255        if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
 256                goto no_merge;
 257
 258        if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
 259                goto no_merge;
 260
 261        /*
 262         * This will form the start of a new hw segment.  Bump both
 263         * counters.
 264         */
 265        req->nr_phys_segments += nr_phys_segs;
 266        return 1;
 267
 268no_merge:
 269        req->cmd_flags |= REQ_NOMERGE;
 270        if (req == q->last_merge)
 271                q->last_merge = NULL;
 272        return 0;
 273}
 274
 275int ll_back_merge_fn(struct request_queue *q, struct request *req,
 276                     struct bio *bio)
 277{
 278        if (blk_rq_sectors(req) + bio_sectors(bio) >
 279            blk_rq_get_max_sectors(req)) {
 280                req->cmd_flags |= REQ_NOMERGE;
 281                if (req == q->last_merge)
 282                        q->last_merge = NULL;
 283                return 0;
 284        }
 285        if (!bio_flagged(req->biotail, BIO_SEG_VALID))
 286                blk_recount_segments(q, req->biotail);
 287        if (!bio_flagged(bio, BIO_SEG_VALID))
 288                blk_recount_segments(q, bio);
 289
 290        return ll_new_hw_segment(q, req, bio);
 291}
 292
 293int ll_front_merge_fn(struct request_queue *q, struct request *req,
 294                      struct bio *bio)
 295{
 296        if (blk_rq_sectors(req) + bio_sectors(bio) >
 297            blk_rq_get_max_sectors(req)) {
 298                req->cmd_flags |= REQ_NOMERGE;
 299                if (req == q->last_merge)
 300                        q->last_merge = NULL;
 301                return 0;
 302        }
 303        if (!bio_flagged(bio, BIO_SEG_VALID))
 304                blk_recount_segments(q, bio);
 305        if (!bio_flagged(req->bio, BIO_SEG_VALID))
 306                blk_recount_segments(q, req->bio);
 307
 308        return ll_new_hw_segment(q, req, bio);
 309}
 310
 311/*
 312 * blk-mq uses req->special to carry normal driver per-request payload, it
 313 * does not indicate a prepared command that we cannot merge with.
 314 */
 315static bool req_no_special_merge(struct request *req)
 316{
 317        struct request_queue *q = req->q;
 318
 319        return !q->mq_ops && req->special;
 320}
 321
 322static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 323                                struct request *next)
 324{
 325        int total_phys_segments;
 326        unsigned int seg_size =
 327                req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
 328
 329        /*
 330         * First check if the either of the requests are re-queued
 331         * requests.  Can't merge them if they are.
 332         */
 333        if (req_no_special_merge(req) || req_no_special_merge(next))
 334                return 0;
 335
 336        /*
 337         * Will it become too large?
 338         */
 339        if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
 340            blk_rq_get_max_sectors(req))
 341                return 0;
 342
 343        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 344        if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
 345                if (req->nr_phys_segments == 1)
 346                        req->bio->bi_seg_front_size = seg_size;
 347                if (next->nr_phys_segments == 1)
 348                        next->biotail->bi_seg_back_size = seg_size;
 349                total_phys_segments--;
 350        }
 351
 352        if (total_phys_segments > queue_max_segments(q))
 353                return 0;
 354
 355        if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
 356                return 0;
 357
 358        /* Merge is OK... */
 359        req->nr_phys_segments = total_phys_segments;
 360        return 1;
 361}
 362
 363/**
 364 * blk_rq_set_mixed_merge - mark a request as mixed merge
 365 * @rq: request to mark as mixed merge
 366 *
 367 * Description:
 368 *     @rq is about to be mixed merged.  Make sure the attributes
 369 *     which can be mixed are set in each bio and mark @rq as mixed
 370 *     merged.
 371 */
 372void blk_rq_set_mixed_merge(struct request *rq)
 373{
 374        unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 375        struct bio *bio;
 376
 377        if (rq->cmd_flags & REQ_MIXED_MERGE)
 378                return;
 379
 380        /*
 381         * @rq will no longer represent mixable attributes for all the
 382         * contained bios.  It will just track those of the first one.
 383         * Distributes the attributs to each bio.
 384         */
 385        for (bio = rq->bio; bio; bio = bio->bi_next) {
 386                WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
 387                             (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
 388                bio->bi_rw |= ff;
 389        }
 390        rq->cmd_flags |= REQ_MIXED_MERGE;
 391}
 392
 393static void blk_account_io_merge(struct request *req)
 394{
 395        if (blk_do_io_stat(req)) {
 396                struct hd_struct *part;
 397                int cpu;
 398
 399                cpu = part_stat_lock();
 400                part = req->part;
 401
 402                part_round_stats(cpu, part);
 403                part_dec_in_flight(part, rq_data_dir(req));
 404
 405                hd_struct_put(part);
 406                part_stat_unlock();
 407        }
 408}
 409
 410/*
 411 * Has to be called with the request spinlock acquired
 412 */
 413static int attempt_merge(struct request_queue *q, struct request *req,
 414                          struct request *next)
 415{
 416        if (!rq_mergeable(req) || !rq_mergeable(next))
 417                return 0;
 418
 419        if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
 420                return 0;
 421
 422        /*
 423         * not contiguous
 424         */
 425        if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
 426                return 0;
 427
 428        if (rq_data_dir(req) != rq_data_dir(next)
 429            || req->rq_disk != next->rq_disk
 430            || req_no_special_merge(next))
 431                return 0;
 432
 433        if (req->cmd_flags & REQ_WRITE_SAME &&
 434            !blk_write_same_mergeable(req->bio, next->bio))
 435                return 0;
 436
 437        /*
 438         * If we are allowed to merge, then append bio list
 439         * from next to rq and release next. merge_requests_fn
 440         * will have updated segment counts, update sector
 441         * counts here.
 442         */
 443        if (!ll_merge_requests_fn(q, req, next))
 444                return 0;
 445
 446        /*
 447         * If failfast settings disagree or any of the two is already
 448         * a mixed merge, mark both as mixed before proceeding.  This
 449         * makes sure that all involved bios have mixable attributes
 450         * set properly.
 451         */
 452        if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
 453            (req->cmd_flags & REQ_FAILFAST_MASK) !=
 454            (next->cmd_flags & REQ_FAILFAST_MASK)) {
 455                blk_rq_set_mixed_merge(req);
 456                blk_rq_set_mixed_merge(next);
 457        }
 458
 459        /*
 460         * At this point we have either done a back merge
 461         * or front merge. We need the smaller start_time of
 462         * the merged requests to be the current request
 463         * for accounting purposes.
 464         */
 465        if (time_after(req->start_time, next->start_time))
 466                req->start_time = next->start_time;
 467
 468        req->biotail->bi_next = next->bio;
 469        req->biotail = next->biotail;
 470
 471        req->__data_len += blk_rq_bytes(next);
 472
 473        elv_merge_requests(q, req, next);
 474
 475        /*
 476         * 'next' is going away, so update stats accordingly
 477         */
 478        blk_account_io_merge(next);
 479
 480        req->ioprio = ioprio_best(req->ioprio, next->ioprio);
 481        if (blk_rq_cpu_valid(next))
 482                req->cpu = next->cpu;
 483
 484        /* owner-ship of bio passed from next to req */
 485        next->bio = NULL;
 486        __blk_put_request(q, next);
 487        return 1;
 488}
 489
 490int attempt_back_merge(struct request_queue *q, struct request *rq)
 491{
 492        struct request *next = elv_latter_request(q, rq);
 493
 494        if (next)
 495                return attempt_merge(q, rq, next);
 496
 497        return 0;
 498}
 499
 500int attempt_front_merge(struct request_queue *q, struct request *rq)
 501{
 502        struct request *prev = elv_former_request(q, rq);
 503
 504        if (prev)
 505                return attempt_merge(q, prev, rq);
 506
 507        return 0;
 508}
 509
 510int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 511                          struct request *next)
 512{
 513        return attempt_merge(q, rq, next);
 514}
 515
 516bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 517{
 518        if (!rq_mergeable(rq) || !bio_mergeable(bio))
 519                return false;
 520
 521        if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
 522                return false;
 523
 524        /* different data direction or already started, don't merge */
 525        if (bio_data_dir(bio) != rq_data_dir(rq))
 526                return false;
 527
 528        /* must be same device and not a special request */
 529        if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
 530                return false;
 531
 532        /* only merge integrity protected bio into ditto rq */
 533        if (bio_integrity(bio) != blk_integrity_rq(rq))
 534                return false;
 535
 536        /* must be using the same buffer */
 537        if (rq->cmd_flags & REQ_WRITE_SAME &&
 538            !blk_write_same_mergeable(rq->bio, bio))
 539                return false;
 540
 541        return true;
 542}
 543
 544int blk_try_merge(struct request *rq, struct bio *bio)
 545{
 546        if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
 547                return ELEVATOR_BACK_MERGE;
 548        else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
 549                return ELEVATOR_FRONT_MERGE;
 550        return ELEVATOR_NO_MERGE;
 551}
 552