linux/block/blk-merge.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to segment and merge handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/scatterlist.h>
  10
  11#include <trace/events/block.h>
  12
  13#include "blk.h"
  14
  15static struct bio *blk_bio_discard_split(struct request_queue *q,
  16                                         struct bio *bio,
  17                                         struct bio_set *bs,
  18                                         unsigned *nsegs)
  19{
  20        unsigned int max_discard_sectors, granularity;
  21        int alignment;
  22        sector_t tmp;
  23        unsigned split_sectors;
  24
  25        *nsegs = 1;
  26
  27        /* Zero-sector (unknown) and one-sector granularities are the same.  */
  28        granularity = max(q->limits.discard_granularity >> 9, 1U);
  29
  30        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  31        max_discard_sectors -= max_discard_sectors % granularity;
  32
  33        if (unlikely(!max_discard_sectors)) {
  34                /* XXX: warn */
  35                return NULL;
  36        }
  37
  38        if (bio_sectors(bio) <= max_discard_sectors)
  39                return NULL;
  40
  41        split_sectors = max_discard_sectors;
  42
  43        /*
  44         * If the next starting sector would be misaligned, stop the discard at
  45         * the previous aligned sector.
  46         */
  47        alignment = (q->limits.discard_alignment >> 9) % granularity;
  48
  49        tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  50        tmp = sector_div(tmp, granularity);
  51
  52        if (split_sectors > tmp)
  53                split_sectors -= tmp;
  54
  55        return bio_split(bio, split_sectors, GFP_NOIO, bs);
  56}
  57
  58static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
  59                struct bio *bio, struct bio_set *bs, unsigned *nsegs)
  60{
  61        *nsegs = 1;
  62
  63        if (!q->limits.max_write_zeroes_sectors)
  64                return NULL;
  65
  66        if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
  67                return NULL;
  68
  69        return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
  70}
  71
  72static struct bio *blk_bio_write_same_split(struct request_queue *q,
  73                                            struct bio *bio,
  74                                            struct bio_set *bs,
  75                                            unsigned *nsegs)
  76{
  77        *nsegs = 1;
  78
  79        if (!q->limits.max_write_same_sectors)
  80                return NULL;
  81
  82        if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
  83                return NULL;
  84
  85        return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
  86}
  87
  88static inline unsigned get_max_io_size(struct request_queue *q,
  89                                       struct bio *bio)
  90{
  91        unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
  92        unsigned mask = queue_logical_block_size(q) - 1;
  93
  94        /* aligned to logical block size */
  95        sectors &= ~(mask >> 9);
  96
  97        return sectors;
  98}
  99
 100static struct bio *blk_bio_segment_split(struct request_queue *q,
 101                                         struct bio *bio,
 102                                         struct bio_set *bs,
 103                                         unsigned *segs)
 104{
 105        struct bio_vec bv, bvprv, *bvprvp = NULL;
 106        struct bvec_iter iter;
 107        unsigned seg_size = 0, nsegs = 0, sectors = 0;
 108        unsigned front_seg_size = bio->bi_seg_front_size;
 109        bool do_split = true;
 110        struct bio *new = NULL;
 111        const unsigned max_sectors = get_max_io_size(q, bio);
 112
 113        bio_for_each_segment(bv, bio, iter) {
 114                /*
 115                 * If the queue doesn't support SG gaps and adding this
 116                 * offset would create a gap, disallow it.
 117                 */
 118                if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
 119                        goto split;
 120
 121                if (sectors + (bv.bv_len >> 9) > max_sectors) {
 122                        /*
 123                         * Consider this a new segment if we're splitting in
 124                         * the middle of this vector.
 125                         */
 126                        if (nsegs < queue_max_segments(q) &&
 127                            sectors < max_sectors) {
 128                                nsegs++;
 129                                sectors = max_sectors;
 130                        }
 131                        goto split;
 132                }
 133
 134                if (bvprvp && blk_queue_cluster(q)) {
 135                        if (seg_size + bv.bv_len > queue_max_segment_size(q))
 136                                goto new_segment;
 137                        if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
 138                                goto new_segment;
 139                        if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
 140                                goto new_segment;
 141
 142                        seg_size += bv.bv_len;
 143                        bvprv = bv;
 144                        bvprvp = &bvprv;
 145                        sectors += bv.bv_len >> 9;
 146
 147                        continue;
 148                }
 149new_segment:
 150                if (nsegs == queue_max_segments(q))
 151                        goto split;
 152
 153                if (nsegs == 1 && seg_size > front_seg_size)
 154                        front_seg_size = seg_size;
 155
 156                nsegs++;
 157                bvprv = bv;
 158                bvprvp = &bvprv;
 159                seg_size = bv.bv_len;
 160                sectors += bv.bv_len >> 9;
 161
 162        }
 163
 164        do_split = false;
 165split:
 166        *segs = nsegs;
 167
 168        if (do_split) {
 169                new = bio_split(bio, sectors, GFP_NOIO, bs);
 170                if (new)
 171                        bio = new;
 172        }
 173
 174        if (nsegs == 1 && seg_size > front_seg_size)
 175                front_seg_size = seg_size;
 176        bio->bi_seg_front_size = front_seg_size;
 177        if (seg_size > bio->bi_seg_back_size)
 178                bio->bi_seg_back_size = seg_size;
 179
 180        return do_split ? new : NULL;
 181}
 182
 183void blk_queue_split(struct request_queue *q, struct bio **bio)
 184{
 185        struct bio *split, *res;
 186        unsigned nsegs;
 187
 188        switch (bio_op(*bio)) {
 189        case REQ_OP_DISCARD:
 190        case REQ_OP_SECURE_ERASE:
 191                split = blk_bio_discard_split(q, *bio, q->bio_split, &nsegs);
 192                break;
 193        case REQ_OP_WRITE_ZEROES:
 194                split = blk_bio_write_zeroes_split(q, *bio, q->bio_split, &nsegs);
 195                break;
 196        case REQ_OP_WRITE_SAME:
 197                split = blk_bio_write_same_split(q, *bio, q->bio_split, &nsegs);
 198                break;
 199        default:
 200                split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
 201                break;
 202        }
 203
 204        /* physical segments can be figured out during splitting */
 205        res = split ? split : *bio;
 206        res->bi_phys_segments = nsegs;
 207        bio_set_flag(res, BIO_SEG_VALID);
 208
 209        if (split) {
 210                /* there isn't chance to merge the splitted bio */
 211                split->bi_opf |= REQ_NOMERGE;
 212
 213                bio_chain(split, *bio);
 214                trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
 215                generic_make_request(*bio);
 216                *bio = split;
 217        }
 218}
 219EXPORT_SYMBOL(blk_queue_split);
 220
 221static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 222                                             struct bio *bio,
 223                                             bool no_sg_merge)
 224{
 225        struct bio_vec bv, bvprv = { NULL };
 226        int cluster, prev = 0;
 227        unsigned int seg_size, nr_phys_segs;
 228        struct bio *fbio, *bbio;
 229        struct bvec_iter iter;
 230
 231        if (!bio)
 232                return 0;
 233
 234        switch (bio_op(bio)) {
 235        case REQ_OP_DISCARD:
 236        case REQ_OP_SECURE_ERASE:
 237        case REQ_OP_WRITE_ZEROES:
 238                return 0;
 239        case REQ_OP_WRITE_SAME:
 240                return 1;
 241        }
 242
 243        fbio = bio;
 244        cluster = blk_queue_cluster(q);
 245        seg_size = 0;
 246        nr_phys_segs = 0;
 247        for_each_bio(bio) {
 248                bio_for_each_segment(bv, bio, iter) {
 249                        /*
 250                         * If SG merging is disabled, each bio vector is
 251                         * a segment
 252                         */
 253                        if (no_sg_merge)
 254                                goto new_segment;
 255
 256                        if (prev && cluster) {
 257                                if (seg_size + bv.bv_len
 258                                    > queue_max_segment_size(q))
 259                                        goto new_segment;
 260                                if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
 261                                        goto new_segment;
 262                                if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
 263                                        goto new_segment;
 264
 265                                seg_size += bv.bv_len;
 266                                bvprv = bv;
 267                                continue;
 268                        }
 269new_segment:
 270                        if (nr_phys_segs == 1 && seg_size >
 271                            fbio->bi_seg_front_size)
 272                                fbio->bi_seg_front_size = seg_size;
 273
 274                        nr_phys_segs++;
 275                        bvprv = bv;
 276                        prev = 1;
 277                        seg_size = bv.bv_len;
 278                }
 279                bbio = bio;
 280        }
 281
 282        if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
 283                fbio->bi_seg_front_size = seg_size;
 284        if (seg_size > bbio->bi_seg_back_size)
 285                bbio->bi_seg_back_size = seg_size;
 286
 287        return nr_phys_segs;
 288}
 289
 290void blk_recalc_rq_segments(struct request *rq)
 291{
 292        bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
 293                        &rq->q->queue_flags);
 294
 295        rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
 296                        no_sg_merge);
 297}
 298
 299void blk_recount_segments(struct request_queue *q, struct bio *bio)
 300{
 301        unsigned short seg_cnt;
 302
 303        /* estimate segment number by bi_vcnt for non-cloned bio */
 304        if (bio_flagged(bio, BIO_CLONED))
 305                seg_cnt = bio_segments(bio);
 306        else
 307                seg_cnt = bio->bi_vcnt;
 308
 309        if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
 310                        (seg_cnt < queue_max_segments(q)))
 311                bio->bi_phys_segments = seg_cnt;
 312        else {
 313                struct bio *nxt = bio->bi_next;
 314
 315                bio->bi_next = NULL;
 316                bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
 317                bio->bi_next = nxt;
 318        }
 319
 320        bio_set_flag(bio, BIO_SEG_VALID);
 321}
 322EXPORT_SYMBOL(blk_recount_segments);
 323
 324static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 325                                   struct bio *nxt)
 326{
 327        struct bio_vec end_bv = { NULL }, nxt_bv;
 328
 329        if (!blk_queue_cluster(q))
 330                return 0;
 331
 332        if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
 333            queue_max_segment_size(q))
 334                return 0;
 335
 336        if (!bio_has_data(bio))
 337                return 1;
 338
 339        bio_get_last_bvec(bio, &end_bv);
 340        bio_get_first_bvec(nxt, &nxt_bv);
 341
 342        if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
 343                return 0;
 344
 345        /*
 346         * bio and nxt are contiguous in memory; check if the queue allows
 347         * these two to be merged into one
 348         */
 349        if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
 350                return 1;
 351
 352        return 0;
 353}
 354
 355static inline void
 356__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
 357                     struct scatterlist *sglist, struct bio_vec *bvprv,
 358                     struct scatterlist **sg, int *nsegs, int *cluster)
 359{
 360
 361        int nbytes = bvec->bv_len;
 362
 363        if (*sg && *cluster) {
 364                if ((*sg)->length + nbytes > queue_max_segment_size(q))
 365                        goto new_segment;
 366
 367                if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
 368                        goto new_segment;
 369                if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
 370                        goto new_segment;
 371
 372                (*sg)->length += nbytes;
 373        } else {
 374new_segment:
 375                if (!*sg)
 376                        *sg = sglist;
 377                else {
 378                        /*
 379                         * If the driver previously mapped a shorter
 380                         * list, we could see a termination bit
 381                         * prematurely unless it fully inits the sg
 382                         * table on each mapping. We KNOW that there
 383                         * must be more entries here or the driver
 384                         * would be buggy, so force clear the
 385                         * termination bit to avoid doing a full
 386                         * sg_init_table() in drivers for each command.
 387                         */
 388                        sg_unmark_end(*sg);
 389                        *sg = sg_next(*sg);
 390                }
 391
 392                sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
 393                (*nsegs)++;
 394        }
 395        *bvprv = *bvec;
 396}
 397
 398static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
 399                struct scatterlist *sglist, struct scatterlist **sg)
 400{
 401        *sg = sglist;
 402        sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
 403        return 1;
 404}
 405
 406static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 407                             struct scatterlist *sglist,
 408                             struct scatterlist **sg)
 409{
 410        struct bio_vec bvec, bvprv = { NULL };
 411        struct bvec_iter iter;
 412        int cluster = blk_queue_cluster(q), nsegs = 0;
 413
 414        for_each_bio(bio)
 415                bio_for_each_segment(bvec, bio, iter)
 416                        __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
 417                                             &nsegs, &cluster);
 418
 419        return nsegs;
 420}
 421
 422/*
 423 * map a request to scatterlist, return number of sg entries setup. Caller
 424 * must make sure sg can hold rq->nr_phys_segments entries
 425 */
 426int blk_rq_map_sg(struct request_queue *q, struct request *rq,
 427                  struct scatterlist *sglist)
 428{
 429        struct scatterlist *sg = NULL;
 430        int nsegs = 0;
 431
 432        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
 433                nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
 434        else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
 435                nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
 436        else if (rq->bio)
 437                nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
 438
 439        if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
 440            (blk_rq_bytes(rq) & q->dma_pad_mask)) {
 441                unsigned int pad_len =
 442                        (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
 443
 444                sg->length += pad_len;
 445                rq->extra_len += pad_len;
 446        }
 447
 448        if (q->dma_drain_size && q->dma_drain_needed(rq)) {
 449                if (op_is_write(req_op(rq)))
 450                        memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 451
 452                sg_unmark_end(sg);
 453                sg = sg_next(sg);
 454                sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
 455                            q->dma_drain_size,
 456                            ((unsigned long)q->dma_drain_buffer) &
 457                            (PAGE_SIZE - 1));
 458                nsegs++;
 459                rq->extra_len += q->dma_drain_size;
 460        }
 461
 462        if (sg)
 463                sg_mark_end(sg);
 464
 465        /*
 466         * Something must have been wrong if the figured number of
 467         * segment is bigger than number of req's physical segments
 468         */
 469        WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
 470
 471        return nsegs;
 472}
 473EXPORT_SYMBOL(blk_rq_map_sg);
 474
 475static inline int ll_new_hw_segment(struct request_queue *q,
 476                                    struct request *req,
 477                                    struct bio *bio)
 478{
 479        int nr_phys_segs = bio_phys_segments(q, bio);
 480
 481        if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
 482                goto no_merge;
 483
 484        if (blk_integrity_merge_bio(q, req, bio) == false)
 485                goto no_merge;
 486
 487        /*
 488         * This will form the start of a new hw segment.  Bump both
 489         * counters.
 490         */
 491        req->nr_phys_segments += nr_phys_segs;
 492        return 1;
 493
 494no_merge:
 495        req_set_nomerge(q, req);
 496        return 0;
 497}
 498
 499int ll_back_merge_fn(struct request_queue *q, struct request *req,
 500                     struct bio *bio)
 501{
 502        if (req_gap_back_merge(req, bio))
 503                return 0;
 504        if (blk_integrity_rq(req) &&
 505            integrity_req_gap_back_merge(req, bio))
 506                return 0;
 507        if (blk_rq_sectors(req) + bio_sectors(bio) >
 508            blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
 509                req_set_nomerge(q, req);
 510                return 0;
 511        }
 512        if (!bio_flagged(req->biotail, BIO_SEG_VALID))
 513                blk_recount_segments(q, req->biotail);
 514        if (!bio_flagged(bio, BIO_SEG_VALID))
 515                blk_recount_segments(q, bio);
 516
 517        return ll_new_hw_segment(q, req, bio);
 518}
 519
 520int ll_front_merge_fn(struct request_queue *q, struct request *req,
 521                      struct bio *bio)
 522{
 523
 524        if (req_gap_front_merge(req, bio))
 525                return 0;
 526        if (blk_integrity_rq(req) &&
 527            integrity_req_gap_front_merge(req, bio))
 528                return 0;
 529        if (blk_rq_sectors(req) + bio_sectors(bio) >
 530            blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
 531                req_set_nomerge(q, req);
 532                return 0;
 533        }
 534        if (!bio_flagged(bio, BIO_SEG_VALID))
 535                blk_recount_segments(q, bio);
 536        if (!bio_flagged(req->bio, BIO_SEG_VALID))
 537                blk_recount_segments(q, req->bio);
 538
 539        return ll_new_hw_segment(q, req, bio);
 540}
 541
 542/*
 543 * blk-mq uses req->special to carry normal driver per-request payload, it
 544 * does not indicate a prepared command that we cannot merge with.
 545 */
 546static bool req_no_special_merge(struct request *req)
 547{
 548        struct request_queue *q = req->q;
 549
 550        return !q->mq_ops && req->special;
 551}
 552
 553static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
 554                struct request *next)
 555{
 556        unsigned short segments = blk_rq_nr_discard_segments(req);
 557
 558        if (segments >= queue_max_discard_segments(q))
 559                goto no_merge;
 560        if (blk_rq_sectors(req) + bio_sectors(next->bio) >
 561            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 562                goto no_merge;
 563
 564        req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
 565        return true;
 566no_merge:
 567        req_set_nomerge(q, req);
 568        return false;
 569}
 570
 571static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 572                                struct request *next)
 573{
 574        int total_phys_segments;
 575        unsigned int seg_size =
 576                req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
 577
 578        /*
 579         * First check if the either of the requests are re-queued
 580         * requests.  Can't merge them if they are.
 581         */
 582        if (req_no_special_merge(req) || req_no_special_merge(next))
 583                return 0;
 584
 585        if (req_gap_back_merge(req, next->bio))
 586                return 0;
 587
 588        /*
 589         * Will it become too large?
 590         */
 591        if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
 592            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 593                return 0;
 594
 595        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 596        if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
 597                if (req->nr_phys_segments == 1)
 598                        req->bio->bi_seg_front_size = seg_size;
 599                if (next->nr_phys_segments == 1)
 600                        next->biotail->bi_seg_back_size = seg_size;
 601                total_phys_segments--;
 602        }
 603
 604        if (total_phys_segments > queue_max_segments(q))
 605                return 0;
 606
 607        if (blk_integrity_merge_rq(q, req, next) == false)
 608                return 0;
 609
 610        /* Merge is OK... */
 611        req->nr_phys_segments = total_phys_segments;
 612        return 1;
 613}
 614
 615/**
 616 * blk_rq_set_mixed_merge - mark a request as mixed merge
 617 * @rq: request to mark as mixed merge
 618 *
 619 * Description:
 620 *     @rq is about to be mixed merged.  Make sure the attributes
 621 *     which can be mixed are set in each bio and mark @rq as mixed
 622 *     merged.
 623 */
 624void blk_rq_set_mixed_merge(struct request *rq)
 625{
 626        unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 627        struct bio *bio;
 628
 629        if (rq->rq_flags & RQF_MIXED_MERGE)
 630                return;
 631
 632        /*
 633         * @rq will no longer represent mixable attributes for all the
 634         * contained bios.  It will just track those of the first one.
 635         * Distributes the attributs to each bio.
 636         */
 637        for (bio = rq->bio; bio; bio = bio->bi_next) {
 638                WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
 639                             (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
 640                bio->bi_opf |= ff;
 641        }
 642        rq->rq_flags |= RQF_MIXED_MERGE;
 643}
 644
 645static void blk_account_io_merge(struct request *req)
 646{
 647        if (blk_do_io_stat(req)) {
 648                struct hd_struct *part;
 649                int cpu;
 650
 651                cpu = part_stat_lock();
 652                part = req->part;
 653
 654                part_round_stats(req->q, cpu, part);
 655                part_dec_in_flight(req->q, part, rq_data_dir(req));
 656
 657                hd_struct_put(part);
 658                part_stat_unlock();
 659        }
 660}
 661
 662/*
 663 * For non-mq, this has to be called with the request spinlock acquired.
 664 * For mq with scheduling, the appropriate queue wide lock should be held.
 665 */
 666static struct request *attempt_merge(struct request_queue *q,
 667                                     struct request *req, struct request *next)
 668{
 669        if (!q->mq_ops)
 670                lockdep_assert_held(q->queue_lock);
 671
 672        if (!rq_mergeable(req) || !rq_mergeable(next))
 673                return NULL;
 674
 675        if (req_op(req) != req_op(next))
 676                return NULL;
 677
 678        /*
 679         * not contiguous
 680         */
 681        if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
 682                return NULL;
 683
 684        if (rq_data_dir(req) != rq_data_dir(next)
 685            || req->rq_disk != next->rq_disk
 686            || req_no_special_merge(next))
 687                return NULL;
 688
 689        if (req_op(req) == REQ_OP_WRITE_SAME &&
 690            !blk_write_same_mergeable(req->bio, next->bio))
 691                return NULL;
 692
 693        /*
 694         * Don't allow merge of different write hints, or for a hint with
 695         * non-hint IO.
 696         */
 697        if (req->write_hint != next->write_hint)
 698                return NULL;
 699
 700        /*
 701         * If we are allowed to merge, then append bio list
 702         * from next to rq and release next. merge_requests_fn
 703         * will have updated segment counts, update sector
 704         * counts here. Handle DISCARDs separately, as they
 705         * have separate settings.
 706         */
 707        if (req_op(req) == REQ_OP_DISCARD) {
 708                if (!req_attempt_discard_merge(q, req, next))
 709                        return NULL;
 710        } else if (!ll_merge_requests_fn(q, req, next))
 711                return NULL;
 712
 713        /*
 714         * If failfast settings disagree or any of the two is already
 715         * a mixed merge, mark both as mixed before proceeding.  This
 716         * makes sure that all involved bios have mixable attributes
 717         * set properly.
 718         */
 719        if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
 720            (req->cmd_flags & REQ_FAILFAST_MASK) !=
 721            (next->cmd_flags & REQ_FAILFAST_MASK)) {
 722                blk_rq_set_mixed_merge(req);
 723                blk_rq_set_mixed_merge(next);
 724        }
 725
 726        /*
 727         * At this point we have either done a back merge
 728         * or front merge. We need the smaller start_time of
 729         * the merged requests to be the current request
 730         * for accounting purposes.
 731         */
 732        if (time_after(req->start_time, next->start_time))
 733                req->start_time = next->start_time;
 734
 735        req->biotail->bi_next = next->bio;
 736        req->biotail = next->biotail;
 737
 738        req->__data_len += blk_rq_bytes(next);
 739
 740        if (req_op(req) != REQ_OP_DISCARD)
 741                elv_merge_requests(q, req, next);
 742
 743        /*
 744         * 'next' is going away, so update stats accordingly
 745         */
 746        blk_account_io_merge(next);
 747
 748        req->ioprio = ioprio_best(req->ioprio, next->ioprio);
 749        if (blk_rq_cpu_valid(next))
 750                req->cpu = next->cpu;
 751
 752        /*
 753         * ownership of bio passed from next to req, return 'next' for
 754         * the caller to free
 755         */
 756        next->bio = NULL;
 757        return next;
 758}
 759
 760struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
 761{
 762        struct request *next = elv_latter_request(q, rq);
 763
 764        if (next)
 765                return attempt_merge(q, rq, next);
 766
 767        return NULL;
 768}
 769
 770struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
 771{
 772        struct request *prev = elv_former_request(q, rq);
 773
 774        if (prev)
 775                return attempt_merge(q, prev, rq);
 776
 777        return NULL;
 778}
 779
 780int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 781                          struct request *next)
 782{
 783        struct elevator_queue *e = q->elevator;
 784        struct request *free;
 785
 786        if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
 787                if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
 788                        return 0;
 789
 790        free = attempt_merge(q, rq, next);
 791        if (free) {
 792                __blk_put_request(q, free);
 793                return 1;
 794        }
 795
 796        return 0;
 797}
 798
 799bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 800{
 801        if (!rq_mergeable(rq) || !bio_mergeable(bio))
 802                return false;
 803
 804        if (req_op(rq) != bio_op(bio))
 805                return false;
 806
 807        /* different data direction or already started, don't merge */
 808        if (bio_data_dir(bio) != rq_data_dir(rq))
 809                return false;
 810
 811        /* must be same device and not a special request */
 812        if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
 813                return false;
 814
 815        /* only merge integrity protected bio into ditto rq */
 816        if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
 817                return false;
 818
 819        /* must be using the same buffer */
 820        if (req_op(rq) == REQ_OP_WRITE_SAME &&
 821            !blk_write_same_mergeable(rq->bio, bio))
 822                return false;
 823
 824        /*
 825         * Don't allow merge of different write hints, or for a hint with
 826         * non-hint IO.
 827         */
 828        if (rq->write_hint != bio->bi_write_hint)
 829                return false;
 830
 831        return true;
 832}
 833
 834enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 835{
 836        if (req_op(rq) == REQ_OP_DISCARD &&
 837            queue_max_discard_segments(rq->q) > 1)
 838                return ELEVATOR_DISCARD_MERGE;
 839        else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
 840                return ELEVATOR_BACK_MERGE;
 841        else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
 842                return ELEVATOR_FRONT_MERGE;
 843        return ELEVATOR_NO_MERGE;
 844}
 845