linux/block/blk-merge.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to segment and merge handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/scatterlist.h>
  10
  11#include <trace/events/block.h>
  12
  13#include "blk.h"
  14
  15static inline bool bio_will_gap(struct request_queue *q,
  16                struct request *prev_rq, struct bio *prev, struct bio *next)
  17{
  18        struct bio_vec pb, nb;
  19
  20        if (!bio_has_data(prev) || !queue_virt_boundary(q))
  21                return false;
  22
  23        /*
  24         * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  25         * is quite difficult to respect the sg gap limit.  We work hard to
  26         * merge a huge number of small single bios in case of mkfs.
  27         */
  28        if (prev_rq)
  29                bio_get_first_bvec(prev_rq->bio, &pb);
  30        else
  31                bio_get_first_bvec(prev, &pb);
  32        if (pb.bv_offset & queue_virt_boundary(q))
  33                return true;
  34
  35        /*
  36         * We don't need to worry about the situation that the merged segment
  37         * ends in unaligned virt boundary:
  38         *
  39         * - if 'pb' ends aligned, the merged segment ends aligned
  40         * - if 'pb' ends unaligned, the next bio must include
  41         *   one single bvec of 'nb', otherwise the 'nb' can't
  42         *   merge with 'pb'
  43         */
  44        bio_get_last_bvec(prev, &pb);
  45        bio_get_first_bvec(next, &nb);
  46        if (biovec_phys_mergeable(q, &pb, &nb))
  47                return false;
  48        return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
  49}
  50
  51static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  52{
  53        return bio_will_gap(req->q, req, req->biotail, bio);
  54}
  55
  56static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  57{
  58        return bio_will_gap(req->q, NULL, bio, req->bio);
  59}
  60
  61static struct bio *blk_bio_discard_split(struct request_queue *q,
  62                                         struct bio *bio,
  63                                         struct bio_set *bs,
  64                                         unsigned *nsegs)
  65{
  66        unsigned int max_discard_sectors, granularity;
  67        int alignment;
  68        sector_t tmp;
  69        unsigned split_sectors;
  70
  71        *nsegs = 1;
  72
  73        /* Zero-sector (unknown) and one-sector granularities are the same.  */
  74        granularity = max(q->limits.discard_granularity >> 9, 1U);
  75
  76        max_discard_sectors = min(q->limits.max_discard_sectors,
  77                        bio_allowed_max_sectors(q));
  78        max_discard_sectors -= max_discard_sectors % granularity;
  79
  80        if (unlikely(!max_discard_sectors)) {
  81                /* XXX: warn */
  82                return NULL;
  83        }
  84
  85        if (bio_sectors(bio) <= max_discard_sectors)
  86                return NULL;
  87
  88        split_sectors = max_discard_sectors;
  89
  90        /*
  91         * If the next starting sector would be misaligned, stop the discard at
  92         * the previous aligned sector.
  93         */
  94        alignment = (q->limits.discard_alignment >> 9) % granularity;
  95
  96        tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  97        tmp = sector_div(tmp, granularity);
  98
  99        if (split_sectors > tmp)
 100                split_sectors -= tmp;
 101
 102        return bio_split(bio, split_sectors, GFP_NOIO, bs);
 103}
 104
 105static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
 106                struct bio *bio, struct bio_set *bs, unsigned *nsegs)
 107{
 108        *nsegs = 1;
 109
 110        if (!q->limits.max_write_zeroes_sectors)
 111                return NULL;
 112
 113        if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
 114                return NULL;
 115
 116        return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
 117}
 118
 119static struct bio *blk_bio_write_same_split(struct request_queue *q,
 120                                            struct bio *bio,
 121                                            struct bio_set *bs,
 122                                            unsigned *nsegs)
 123{
 124        *nsegs = 1;
 125
 126        if (!q->limits.max_write_same_sectors)
 127                return NULL;
 128
 129        if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
 130                return NULL;
 131
 132        return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 133}
 134
 135static inline unsigned get_max_io_size(struct request_queue *q,
 136                                       struct bio *bio)
 137{
 138        unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
 139        unsigned mask = queue_logical_block_size(q) - 1;
 140
 141        /* aligned to logical block size */
 142        sectors &= ~(mask >> 9);
 143
 144        return sectors;
 145}
 146
 147static unsigned get_max_segment_size(struct request_queue *q,
 148                                     unsigned offset)
 149{
 150        unsigned long mask = queue_segment_boundary(q);
 151
 152        /* default segment boundary mask means no boundary limit */
 153        if (mask == BLK_SEG_BOUNDARY_MASK)
 154                return queue_max_segment_size(q);
 155
 156        return min_t(unsigned long, mask - (mask & offset) + 1,
 157                     queue_max_segment_size(q));
 158}
 159
 160/*
 161 * Split the bvec @bv into segments, and update all kinds of
 162 * variables.
 163 */
 164static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
 165                unsigned *nsegs, unsigned *sectors, unsigned max_segs)
 166{
 167        unsigned len = bv->bv_len;
 168        unsigned total_len = 0;
 169        unsigned new_nsegs = 0, seg_size = 0;
 170
 171        /*
 172         * Multi-page bvec may be too big to hold in one segment, so the
 173         * current bvec has to be splitted as multiple segments.
 174         */
 175        while (len && new_nsegs + *nsegs < max_segs) {
 176                seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
 177                seg_size = min(seg_size, len);
 178
 179                new_nsegs++;
 180                total_len += seg_size;
 181                len -= seg_size;
 182
 183                if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
 184                        break;
 185        }
 186
 187        if (new_nsegs) {
 188                *nsegs += new_nsegs;
 189                if (sectors)
 190                        *sectors += total_len >> 9;
 191        }
 192
 193        /* split in the middle of the bvec if len != 0 */
 194        return !!len;
 195}
 196
 197static struct bio *blk_bio_segment_split(struct request_queue *q,
 198                                         struct bio *bio,
 199                                         struct bio_set *bs,
 200                                         unsigned *segs)
 201{
 202        struct bio_vec bv, bvprv, *bvprvp = NULL;
 203        struct bvec_iter iter;
 204        unsigned nsegs = 0, sectors = 0;
 205        bool do_split = true;
 206        struct bio *new = NULL;
 207        const unsigned max_sectors = get_max_io_size(q, bio);
 208        const unsigned max_segs = queue_max_segments(q);
 209
 210        bio_for_each_bvec(bv, bio, iter) {
 211                /*
 212                 * If the queue doesn't support SG gaps and adding this
 213                 * offset would create a gap, disallow it.
 214                 */
 215                if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
 216                        goto split;
 217
 218                if (sectors + (bv.bv_len >> 9) > max_sectors) {
 219                        /*
 220                         * Consider this a new segment if we're splitting in
 221                         * the middle of this vector.
 222                         */
 223                        if (nsegs < max_segs &&
 224                            sectors < max_sectors) {
 225                                /* split in the middle of bvec */
 226                                bv.bv_len = (max_sectors - sectors) << 9;
 227                                bvec_split_segs(q, &bv, &nsegs,
 228                                                &sectors, max_segs);
 229                        }
 230                        goto split;
 231                }
 232
 233                if (nsegs == max_segs)
 234                        goto split;
 235
 236                bvprv = bv;
 237                bvprvp = &bvprv;
 238
 239                if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
 240                        nsegs++;
 241                        sectors += bv.bv_len >> 9;
 242                } else if (bvec_split_segs(q, &bv, &nsegs, &sectors,
 243                                max_segs)) {
 244                        goto split;
 245                }
 246        }
 247
 248        do_split = false;
 249split:
 250        *segs = nsegs;
 251
 252        if (do_split) {
 253                new = bio_split(bio, sectors, GFP_NOIO, bs);
 254                if (new)
 255                        bio = new;
 256        }
 257
 258        return do_split ? new : NULL;
 259}
 260
 261void blk_queue_split(struct request_queue *q, struct bio **bio)
 262{
 263        struct bio *split, *res;
 264        unsigned nsegs;
 265
 266        switch (bio_op(*bio)) {
 267        case REQ_OP_DISCARD:
 268        case REQ_OP_SECURE_ERASE:
 269                split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs);
 270                break;
 271        case REQ_OP_WRITE_ZEROES:
 272                split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs);
 273                break;
 274        case REQ_OP_WRITE_SAME:
 275                split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs);
 276                break;
 277        default:
 278                split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs);
 279                break;
 280        }
 281
 282        /* physical segments can be figured out during splitting */
 283        res = split ? split : *bio;
 284        res->bi_phys_segments = nsegs;
 285        bio_set_flag(res, BIO_SEG_VALID);
 286
 287        if (split) {
 288                /* there isn't chance to merge the splitted bio */
 289                split->bi_opf |= REQ_NOMERGE;
 290
 291                /*
 292                 * Since we're recursing into make_request here, ensure
 293                 * that we mark this bio as already having entered the queue.
 294                 * If not, and the queue is going away, we can get stuck
 295                 * forever on waiting for the queue reference to drop. But
 296                 * that will never happen, as we're already holding a
 297                 * reference to it.
 298                 */
 299                bio_set_flag(*bio, BIO_QUEUE_ENTERED);
 300
 301                bio_chain(split, *bio);
 302                trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
 303                generic_make_request(*bio);
 304                *bio = split;
 305        }
 306}
 307EXPORT_SYMBOL(blk_queue_split);
 308
 309static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 310                                             struct bio *bio)
 311{
 312        unsigned int nr_phys_segs = 0;
 313        struct bvec_iter iter;
 314        struct bio_vec bv;
 315
 316        if (!bio)
 317                return 0;
 318
 319        switch (bio_op(bio)) {
 320        case REQ_OP_DISCARD:
 321        case REQ_OP_SECURE_ERASE:
 322        case REQ_OP_WRITE_ZEROES:
 323                return 0;
 324        case REQ_OP_WRITE_SAME:
 325                return 1;
 326        }
 327
 328        for_each_bio(bio) {
 329                bio_for_each_bvec(bv, bio, iter)
 330                        bvec_split_segs(q, &bv, &nr_phys_segs, NULL, UINT_MAX);
 331        }
 332
 333        return nr_phys_segs;
 334}
 335
 336void blk_recalc_rq_segments(struct request *rq)
 337{
 338        rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
 339}
 340
 341void blk_recount_segments(struct request_queue *q, struct bio *bio)
 342{
 343        struct bio *nxt = bio->bi_next;
 344
 345        bio->bi_next = NULL;
 346        bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
 347        bio->bi_next = nxt;
 348
 349        bio_set_flag(bio, BIO_SEG_VALID);
 350}
 351
 352static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
 353                struct scatterlist *sglist)
 354{
 355        if (!*sg)
 356                return sglist;
 357
 358        /*
 359         * If the driver previously mapped a shorter list, we could see a
 360         * termination bit prematurely unless it fully inits the sg table
 361         * on each mapping. We KNOW that there must be more entries here
 362         * or the driver would be buggy, so force clear the termination bit
 363         * to avoid doing a full sg_init_table() in drivers for each command.
 364         */
 365        sg_unmark_end(*sg);
 366        return sg_next(*sg);
 367}
 368
 369static unsigned blk_bvec_map_sg(struct request_queue *q,
 370                struct bio_vec *bvec, struct scatterlist *sglist,
 371                struct scatterlist **sg)
 372{
 373        unsigned nbytes = bvec->bv_len;
 374        unsigned nsegs = 0, total = 0;
 375
 376        while (nbytes > 0) {
 377                unsigned offset = bvec->bv_offset + total;
 378                unsigned len = min(get_max_segment_size(q, offset), nbytes);
 379                struct page *page = bvec->bv_page;
 380
 381                /*
 382                 * Unfortunately a fair number of drivers barf on scatterlists
 383                 * that have an offset larger than PAGE_SIZE, despite other
 384                 * subsystems dealing with that invariant just fine.  For now
 385                 * stick to the legacy format where we never present those from
 386                 * the block layer, but the code below should be removed once
 387                 * these offenders (mostly MMC/SD drivers) are fixed.
 388                 */
 389                page += (offset >> PAGE_SHIFT);
 390                offset &= ~PAGE_MASK;
 391
 392                *sg = blk_next_sg(sg, sglist);
 393                sg_set_page(*sg, page, len, offset);
 394
 395                total += len;
 396                nbytes -= len;
 397                nsegs++;
 398        }
 399
 400        return nsegs;
 401}
 402
 403static inline int __blk_bvec_map_sg(struct bio_vec bv,
 404                struct scatterlist *sglist, struct scatterlist **sg)
 405{
 406        *sg = blk_next_sg(sg, sglist);
 407        sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
 408        return 1;
 409}
 410
 411/* only try to merge bvecs into one sg if they are from two bios */
 412static inline bool
 413__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
 414                           struct bio_vec *bvprv, struct scatterlist **sg)
 415{
 416
 417        int nbytes = bvec->bv_len;
 418
 419        if (!*sg)
 420                return false;
 421
 422        if ((*sg)->length + nbytes > queue_max_segment_size(q))
 423                return false;
 424
 425        if (!biovec_phys_mergeable(q, bvprv, bvec))
 426                return false;
 427
 428        (*sg)->length += nbytes;
 429
 430        return true;
 431}
 432
 433static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 434                             struct scatterlist *sglist,
 435                             struct scatterlist **sg)
 436{
 437        struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
 438        struct bvec_iter iter;
 439        int nsegs = 0;
 440        bool new_bio = false;
 441
 442        for_each_bio(bio) {
 443                bio_for_each_bvec(bvec, bio, iter) {
 444                        /*
 445                         * Only try to merge bvecs from two bios given we
 446                         * have done bio internal merge when adding pages
 447                         * to bio
 448                         */
 449                        if (new_bio &&
 450                            __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
 451                                goto next_bvec;
 452
 453                        if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
 454                                nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
 455                        else
 456                                nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
 457 next_bvec:
 458                        new_bio = false;
 459                }
 460                if (likely(bio->bi_iter.bi_size)) {
 461                        bvprv = bvec;
 462                        new_bio = true;
 463                }
 464        }
 465
 466        return nsegs;
 467}
 468
 469/*
 470 * map a request to scatterlist, return number of sg entries setup. Caller
 471 * must make sure sg can hold rq->nr_phys_segments entries
 472 */
 473int blk_rq_map_sg(struct request_queue *q, struct request *rq,
 474                  struct scatterlist *sglist)
 475{
 476        struct scatterlist *sg = NULL;
 477        int nsegs = 0;
 478
 479        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
 480                nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
 481        else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
 482                nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
 483        else if (rq->bio)
 484                nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
 485
 486        if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
 487            (blk_rq_bytes(rq) & q->dma_pad_mask)) {
 488                unsigned int pad_len =
 489                        (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
 490
 491                sg->length += pad_len;
 492                rq->extra_len += pad_len;
 493        }
 494
 495        if (q->dma_drain_size && q->dma_drain_needed(rq)) {
 496                if (op_is_write(req_op(rq)))
 497                        memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 498
 499                sg_unmark_end(sg);
 500                sg = sg_next(sg);
 501                sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
 502                            q->dma_drain_size,
 503                            ((unsigned long)q->dma_drain_buffer) &
 504                            (PAGE_SIZE - 1));
 505                nsegs++;
 506                rq->extra_len += q->dma_drain_size;
 507        }
 508
 509        if (sg)
 510                sg_mark_end(sg);
 511
 512        /*
 513         * Something must have been wrong if the figured number of
 514         * segment is bigger than number of req's physical segments
 515         */
 516        WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
 517
 518        return nsegs;
 519}
 520EXPORT_SYMBOL(blk_rq_map_sg);
 521
 522static inline int ll_new_hw_segment(struct request_queue *q,
 523                                    struct request *req,
 524                                    struct bio *bio)
 525{
 526        int nr_phys_segs = bio_phys_segments(q, bio);
 527
 528        if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
 529                goto no_merge;
 530
 531        if (blk_integrity_merge_bio(q, req, bio) == false)
 532                goto no_merge;
 533
 534        /*
 535         * This will form the start of a new hw segment.  Bump both
 536         * counters.
 537         */
 538        req->nr_phys_segments += nr_phys_segs;
 539        return 1;
 540
 541no_merge:
 542        req_set_nomerge(q, req);
 543        return 0;
 544}
 545
 546int ll_back_merge_fn(struct request_queue *q, struct request *req,
 547                     struct bio *bio)
 548{
 549        if (req_gap_back_merge(req, bio))
 550                return 0;
 551        if (blk_integrity_rq(req) &&
 552            integrity_req_gap_back_merge(req, bio))
 553                return 0;
 554        if (blk_rq_sectors(req) + bio_sectors(bio) >
 555            blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
 556                req_set_nomerge(q, req);
 557                return 0;
 558        }
 559        if (!bio_flagged(req->biotail, BIO_SEG_VALID))
 560                blk_recount_segments(q, req->biotail);
 561        if (!bio_flagged(bio, BIO_SEG_VALID))
 562                blk_recount_segments(q, bio);
 563
 564        return ll_new_hw_segment(q, req, bio);
 565}
 566
 567int ll_front_merge_fn(struct request_queue *q, struct request *req,
 568                      struct bio *bio)
 569{
 570
 571        if (req_gap_front_merge(req, bio))
 572                return 0;
 573        if (blk_integrity_rq(req) &&
 574            integrity_req_gap_front_merge(req, bio))
 575                return 0;
 576        if (blk_rq_sectors(req) + bio_sectors(bio) >
 577            blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
 578                req_set_nomerge(q, req);
 579                return 0;
 580        }
 581        if (!bio_flagged(bio, BIO_SEG_VALID))
 582                blk_recount_segments(q, bio);
 583        if (!bio_flagged(req->bio, BIO_SEG_VALID))
 584                blk_recount_segments(q, req->bio);
 585
 586        return ll_new_hw_segment(q, req, bio);
 587}
 588
 589static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
 590                struct request *next)
 591{
 592        unsigned short segments = blk_rq_nr_discard_segments(req);
 593
 594        if (segments >= queue_max_discard_segments(q))
 595                goto no_merge;
 596        if (blk_rq_sectors(req) + bio_sectors(next->bio) >
 597            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 598                goto no_merge;
 599
 600        req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
 601        return true;
 602no_merge:
 603        req_set_nomerge(q, req);
 604        return false;
 605}
 606
 607static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 608                                struct request *next)
 609{
 610        int total_phys_segments;
 611
 612        if (req_gap_back_merge(req, next->bio))
 613                return 0;
 614
 615        /*
 616         * Will it become too large?
 617         */
 618        if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
 619            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 620                return 0;
 621
 622        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 623        if (total_phys_segments > queue_max_segments(q))
 624                return 0;
 625
 626        if (blk_integrity_merge_rq(q, req, next) == false)
 627                return 0;
 628
 629        /* Merge is OK... */
 630        req->nr_phys_segments = total_phys_segments;
 631        return 1;
 632}
 633
 634/**
 635 * blk_rq_set_mixed_merge - mark a request as mixed merge
 636 * @rq: request to mark as mixed merge
 637 *
 638 * Description:
 639 *     @rq is about to be mixed merged.  Make sure the attributes
 640 *     which can be mixed are set in each bio and mark @rq as mixed
 641 *     merged.
 642 */
 643void blk_rq_set_mixed_merge(struct request *rq)
 644{
 645        unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 646        struct bio *bio;
 647
 648        if (rq->rq_flags & RQF_MIXED_MERGE)
 649                return;
 650
 651        /*
 652         * @rq will no longer represent mixable attributes for all the
 653         * contained bios.  It will just track those of the first one.
 654         * Distributes the attributs to each bio.
 655         */
 656        for (bio = rq->bio; bio; bio = bio->bi_next) {
 657                WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
 658                             (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
 659                bio->bi_opf |= ff;
 660        }
 661        rq->rq_flags |= RQF_MIXED_MERGE;
 662}
 663
 664static void blk_account_io_merge(struct request *req)
 665{
 666        if (blk_do_io_stat(req)) {
 667                struct hd_struct *part;
 668
 669                part_stat_lock();
 670                part = req->part;
 671
 672                part_dec_in_flight(req->q, part, rq_data_dir(req));
 673
 674                hd_struct_put(part);
 675                part_stat_unlock();
 676        }
 677}
 678/*
 679 * Two cases of handling DISCARD merge:
 680 * If max_discard_segments > 1, the driver takes every bio
 681 * as a range and send them to controller together. The ranges
 682 * needn't to be contiguous.
 683 * Otherwise, the bios/requests will be handled as same as
 684 * others which should be contiguous.
 685 */
 686static inline bool blk_discard_mergable(struct request *req)
 687{
 688        if (req_op(req) == REQ_OP_DISCARD &&
 689            queue_max_discard_segments(req->q) > 1)
 690                return true;
 691        return false;
 692}
 693
 694static enum elv_merge blk_try_req_merge(struct request *req,
 695                                        struct request *next)
 696{
 697        if (blk_discard_mergable(req))
 698                return ELEVATOR_DISCARD_MERGE;
 699        else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
 700                return ELEVATOR_BACK_MERGE;
 701
 702        return ELEVATOR_NO_MERGE;
 703}
 704
 705/*
 706 * For non-mq, this has to be called with the request spinlock acquired.
 707 * For mq with scheduling, the appropriate queue wide lock should be held.
 708 */
 709static struct request *attempt_merge(struct request_queue *q,
 710                                     struct request *req, struct request *next)
 711{
 712        if (!rq_mergeable(req) || !rq_mergeable(next))
 713                return NULL;
 714
 715        if (req_op(req) != req_op(next))
 716                return NULL;
 717
 718        if (rq_data_dir(req) != rq_data_dir(next)
 719            || req->rq_disk != next->rq_disk)
 720                return NULL;
 721
 722        if (req_op(req) == REQ_OP_WRITE_SAME &&
 723            !blk_write_same_mergeable(req->bio, next->bio))
 724                return NULL;
 725
 726        /*
 727         * Don't allow merge of different write hints, or for a hint with
 728         * non-hint IO.
 729         */
 730        if (req->write_hint != next->write_hint)
 731                return NULL;
 732
 733        if (req->ioprio != next->ioprio)
 734                return NULL;
 735
 736        /*
 737         * If we are allowed to merge, then append bio list
 738         * from next to rq and release next. merge_requests_fn
 739         * will have updated segment counts, update sector
 740         * counts here. Handle DISCARDs separately, as they
 741         * have separate settings.
 742         */
 743
 744        switch (blk_try_req_merge(req, next)) {
 745        case ELEVATOR_DISCARD_MERGE:
 746                if (!req_attempt_discard_merge(q, req, next))
 747                        return NULL;
 748                break;
 749        case ELEVATOR_BACK_MERGE:
 750                if (!ll_merge_requests_fn(q, req, next))
 751                        return NULL;
 752                break;
 753        default:
 754                return NULL;
 755        }
 756
 757        /*
 758         * If failfast settings disagree or any of the two is already
 759         * a mixed merge, mark both as mixed before proceeding.  This
 760         * makes sure that all involved bios have mixable attributes
 761         * set properly.
 762         */
 763        if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
 764            (req->cmd_flags & REQ_FAILFAST_MASK) !=
 765            (next->cmd_flags & REQ_FAILFAST_MASK)) {
 766                blk_rq_set_mixed_merge(req);
 767                blk_rq_set_mixed_merge(next);
 768        }
 769
 770        /*
 771         * At this point we have either done a back merge or front merge. We
 772         * need the smaller start_time_ns of the merged requests to be the
 773         * current request for accounting purposes.
 774         */
 775        if (next->start_time_ns < req->start_time_ns)
 776                req->start_time_ns = next->start_time_ns;
 777
 778        req->biotail->bi_next = next->bio;
 779        req->biotail = next->biotail;
 780
 781        req->__data_len += blk_rq_bytes(next);
 782
 783        if (!blk_discard_mergable(req))
 784                elv_merge_requests(q, req, next);
 785
 786        /*
 787         * 'next' is going away, so update stats accordingly
 788         */
 789        blk_account_io_merge(next);
 790
 791        /*
 792         * ownership of bio passed from next to req, return 'next' for
 793         * the caller to free
 794         */
 795        next->bio = NULL;
 796        return next;
 797}
 798
 799struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
 800{
 801        struct request *next = elv_latter_request(q, rq);
 802
 803        if (next)
 804                return attempt_merge(q, rq, next);
 805
 806        return NULL;
 807}
 808
 809struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
 810{
 811        struct request *prev = elv_former_request(q, rq);
 812
 813        if (prev)
 814                return attempt_merge(q, prev, rq);
 815
 816        return NULL;
 817}
 818
 819int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 820                          struct request *next)
 821{
 822        struct request *free;
 823
 824        free = attempt_merge(q, rq, next);
 825        if (free) {
 826                blk_put_request(free);
 827                return 1;
 828        }
 829
 830        return 0;
 831}
 832
 833bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 834{
 835        if (!rq_mergeable(rq) || !bio_mergeable(bio))
 836                return false;
 837
 838        if (req_op(rq) != bio_op(bio))
 839                return false;
 840
 841        /* different data direction or already started, don't merge */
 842        if (bio_data_dir(bio) != rq_data_dir(rq))
 843                return false;
 844
 845        /* must be same device */
 846        if (rq->rq_disk != bio->bi_disk)
 847                return false;
 848
 849        /* only merge integrity protected bio into ditto rq */
 850        if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
 851                return false;
 852
 853        /* must be using the same buffer */
 854        if (req_op(rq) == REQ_OP_WRITE_SAME &&
 855            !blk_write_same_mergeable(rq->bio, bio))
 856                return false;
 857
 858        /*
 859         * Don't allow merge of different write hints, or for a hint with
 860         * non-hint IO.
 861         */
 862        if (rq->write_hint != bio->bi_write_hint)
 863                return false;
 864
 865        if (rq->ioprio != bio_prio(bio))
 866                return false;
 867
 868        return true;
 869}
 870
 871enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 872{
 873        if (blk_discard_mergable(rq))
 874                return ELEVATOR_DISCARD_MERGE;
 875        else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
 876                return ELEVATOR_BACK_MERGE;
 877        else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
 878                return ELEVATOR_FRONT_MERGE;
 879        return ELEVATOR_NO_MERGE;
 880}
 881