linux/block/blk-merge.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to segment and merge handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/scatterlist.h>
  10
  11#include <trace/events/block.h>
  12
  13#include "blk.h"
  14#include "blk-rq-qos.h"
  15
  16static inline bool bio_will_gap(struct request_queue *q,
  17                struct request *prev_rq, struct bio *prev, struct bio *next)
  18{
  19        struct bio_vec pb, nb;
  20
  21        if (!bio_has_data(prev) || !queue_virt_boundary(q))
  22                return false;
  23
  24        /*
  25         * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  26         * is quite difficult to respect the sg gap limit.  We work hard to
  27         * merge a huge number of small single bios in case of mkfs.
  28         */
  29        if (prev_rq)
  30                bio_get_first_bvec(prev_rq->bio, &pb);
  31        else
  32                bio_get_first_bvec(prev, &pb);
  33        if (pb.bv_offset & queue_virt_boundary(q))
  34                return true;
  35
  36        /*
  37         * We don't need to worry about the situation that the merged segment
  38         * ends in unaligned virt boundary:
  39         *
  40         * - if 'pb' ends aligned, the merged segment ends aligned
  41         * - if 'pb' ends unaligned, the next bio must include
  42         *   one single bvec of 'nb', otherwise the 'nb' can't
  43         *   merge with 'pb'
  44         */
  45        bio_get_last_bvec(prev, &pb);
  46        bio_get_first_bvec(next, &nb);
  47        if (biovec_phys_mergeable(q, &pb, &nb))
  48                return false;
  49        return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
  50}
  51
  52static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  53{
  54        return bio_will_gap(req->q, req, req->biotail, bio);
  55}
  56
  57static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  58{
  59        return bio_will_gap(req->q, NULL, bio, req->bio);
  60}
  61
  62static struct bio *blk_bio_discard_split(struct request_queue *q,
  63                                         struct bio *bio,
  64                                         struct bio_set *bs,
  65                                         unsigned *nsegs)
  66{
  67        unsigned int max_discard_sectors, granularity;
  68        int alignment;
  69        sector_t tmp;
  70        unsigned split_sectors;
  71
  72        *nsegs = 1;
  73
  74        /* Zero-sector (unknown) and one-sector granularities are the same.  */
  75        granularity = max(q->limits.discard_granularity >> 9, 1U);
  76
  77        max_discard_sectors = min(q->limits.max_discard_sectors,
  78                        bio_allowed_max_sectors(q));
  79        max_discard_sectors -= max_discard_sectors % granularity;
  80
  81        if (unlikely(!max_discard_sectors)) {
  82                /* XXX: warn */
  83                return NULL;
  84        }
  85
  86        if (bio_sectors(bio) <= max_discard_sectors)
  87                return NULL;
  88
  89        split_sectors = max_discard_sectors;
  90
  91        /*
  92         * If the next starting sector would be misaligned, stop the discard at
  93         * the previous aligned sector.
  94         */
  95        alignment = (q->limits.discard_alignment >> 9) % granularity;
  96
  97        tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  98        tmp = sector_div(tmp, granularity);
  99
 100        if (split_sectors > tmp)
 101                split_sectors -= tmp;
 102
 103        return bio_split(bio, split_sectors, GFP_NOIO, bs);
 104}
 105
 106static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
 107                struct bio *bio, struct bio_set *bs, unsigned *nsegs)
 108{
 109        *nsegs = 0;
 110
 111        if (!q->limits.max_write_zeroes_sectors)
 112                return NULL;
 113
 114        if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
 115                return NULL;
 116
 117        return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
 118}
 119
 120static struct bio *blk_bio_write_same_split(struct request_queue *q,
 121                                            struct bio *bio,
 122                                            struct bio_set *bs,
 123                                            unsigned *nsegs)
 124{
 125        *nsegs = 1;
 126
 127        if (!q->limits.max_write_same_sectors)
 128                return NULL;
 129
 130        if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
 131                return NULL;
 132
 133        return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 134}
 135
 136/*
 137 * Return the maximum number of sectors from the start of a bio that may be
 138 * submitted as a single request to a block device. If enough sectors remain,
 139 * align the end to the physical block size. Otherwise align the end to the
 140 * logical block size. This approach minimizes the number of non-aligned
 141 * requests that are submitted to a block device if the start of a bio is not
 142 * aligned to a physical block boundary.
 143 */
 144static inline unsigned get_max_io_size(struct request_queue *q,
 145                                       struct bio *bio)
 146{
 147        unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
 148        unsigned max_sectors = sectors;
 149        unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
 150        unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
 151        unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
 152
 153        max_sectors += start_offset;
 154        max_sectors &= ~(pbs - 1);
 155        if (max_sectors > start_offset)
 156                return max_sectors - start_offset;
 157
 158        return sectors & ~(lbs - 1);
 159}
 160
 161static inline unsigned get_max_segment_size(const struct request_queue *q,
 162                                            struct page *start_page,
 163                                            unsigned long offset)
 164{
 165        unsigned long mask = queue_segment_boundary(q);
 166
 167        offset = mask & (page_to_phys(start_page) + offset);
 168
 169        /*
 170         * overflow may be triggered in case of zero page physical address
 171         * on 32bit arch, use queue's max segment size when that happens.
 172         */
 173        return min_not_zero(mask - offset + 1,
 174                        (unsigned long)queue_max_segment_size(q));
 175}
 176
 177/**
 178 * bvec_split_segs - verify whether or not a bvec should be split in the middle
 179 * @q:        [in] request queue associated with the bio associated with @bv
 180 * @bv:       [in] bvec to examine
 181 * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
 182 *            by the number of segments from @bv that may be appended to that
 183 *            bio without exceeding @max_segs
 184 * @sectors:  [in,out] Number of sectors in the bio being built. Incremented
 185 *            by the number of sectors from @bv that may be appended to that
 186 *            bio without exceeding @max_sectors
 187 * @max_segs: [in] upper bound for *@nsegs
 188 * @max_sectors: [in] upper bound for *@sectors
 189 *
 190 * When splitting a bio, it can happen that a bvec is encountered that is too
 191 * big to fit in a single segment and hence that it has to be split in the
 192 * middle. This function verifies whether or not that should happen. The value
 193 * %true is returned if and only if appending the entire @bv to a bio with
 194 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
 195 * the block driver.
 196 */
 197static bool bvec_split_segs(const struct request_queue *q,
 198                            const struct bio_vec *bv, unsigned *nsegs,
 199                            unsigned *sectors, unsigned max_segs,
 200                            unsigned max_sectors)
 201{
 202        unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
 203        unsigned len = min(bv->bv_len, max_len);
 204        unsigned total_len = 0;
 205        unsigned seg_size = 0;
 206
 207        while (len && *nsegs < max_segs) {
 208                seg_size = get_max_segment_size(q, bv->bv_page,
 209                                                bv->bv_offset + total_len);
 210                seg_size = min(seg_size, len);
 211
 212                (*nsegs)++;
 213                total_len += seg_size;
 214                len -= seg_size;
 215
 216                if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
 217                        break;
 218        }
 219
 220        *sectors += total_len >> 9;
 221
 222        /* tell the caller to split the bvec if it is too big to fit */
 223        return len > 0 || bv->bv_len > max_len;
 224}
 225
 226/**
 227 * blk_bio_segment_split - split a bio in two bios
 228 * @q:    [in] request queue pointer
 229 * @bio:  [in] bio to be split
 230 * @bs:   [in] bio set to allocate the clone from
 231 * @segs: [out] number of segments in the bio with the first half of the sectors
 232 *
 233 * Clone @bio, update the bi_iter of the clone to represent the first sectors
 234 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
 235 * following is guaranteed for the cloned bio:
 236 * - That it has at most get_max_io_size(@q, @bio) sectors.
 237 * - That it has at most queue_max_segments(@q) segments.
 238 *
 239 * Except for discard requests the cloned bio will point at the bi_io_vec of
 240 * the original bio. It is the responsibility of the caller to ensure that the
 241 * original bio is not freed before the cloned bio. The caller is also
 242 * responsible for ensuring that @bs is only destroyed after processing of the
 243 * split bio has finished.
 244 */
 245static struct bio *blk_bio_segment_split(struct request_queue *q,
 246                                         struct bio *bio,
 247                                         struct bio_set *bs,
 248                                         unsigned *segs)
 249{
 250        struct bio_vec bv, bvprv, *bvprvp = NULL;
 251        struct bvec_iter iter;
 252        unsigned nsegs = 0, sectors = 0;
 253        const unsigned max_sectors = get_max_io_size(q, bio);
 254        const unsigned max_segs = queue_max_segments(q);
 255
 256        bio_for_each_bvec(bv, bio, iter) {
 257                /*
 258                 * If the queue doesn't support SG gaps and adding this
 259                 * offset would create a gap, disallow it.
 260                 */
 261                if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
 262                        goto split;
 263
 264                if (nsegs < max_segs &&
 265                    sectors + (bv.bv_len >> 9) <= max_sectors &&
 266                    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
 267                        nsegs++;
 268                        sectors += bv.bv_len >> 9;
 269                } else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
 270                                         max_sectors)) {
 271                        goto split;
 272                }
 273
 274                bvprv = bv;
 275                bvprvp = &bvprv;
 276        }
 277
 278        *segs = nsegs;
 279        return NULL;
 280split:
 281        *segs = nsegs;
 282
 283        /*
 284         * Bio splitting may cause subtle trouble such as hang when doing sync
 285         * iopoll in direct IO routine. Given performance gain of iopoll for
 286         * big IO can be trival, disable iopoll when split needed.
 287         */
 288        bio_clear_hipri(bio);
 289
 290        return bio_split(bio, sectors, GFP_NOIO, bs);
 291}
 292
 293/**
 294 * __blk_queue_split - split a bio and submit the second half
 295 * @bio:     [in, out] bio to be split
 296 * @nr_segs: [out] number of segments in the first bio
 297 *
 298 * Split a bio into two bios, chain the two bios, submit the second half and
 299 * store a pointer to the first half in *@bio. If the second bio is still too
 300 * big it will be split by a recursive call to this function. Since this
 301 * function may allocate a new bio from q->bio_split, it is the responsibility
 302 * of the caller to ensure that q->bio_split is only released after processing
 303 * of the split bio has finished.
 304 */
 305void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
 306{
 307        struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
 308        struct bio *split = NULL;
 309
 310        switch (bio_op(*bio)) {
 311        case REQ_OP_DISCARD:
 312        case REQ_OP_SECURE_ERASE:
 313                split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
 314                break;
 315        case REQ_OP_WRITE_ZEROES:
 316                split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
 317                                nr_segs);
 318                break;
 319        case REQ_OP_WRITE_SAME:
 320                split = blk_bio_write_same_split(q, *bio, &q->bio_split,
 321                                nr_segs);
 322                break;
 323        default:
 324                /*
 325                 * All drivers must accept single-segments bios that are <=
 326                 * PAGE_SIZE.  This is a quick and dirty check that relies on
 327                 * the fact that bi_io_vec[0] is always valid if a bio has data.
 328                 * The check might lead to occasional false negatives when bios
 329                 * are cloned, but compared to the performance impact of cloned
 330                 * bios themselves the loop below doesn't matter anyway.
 331                 */
 332                if (!q->limits.chunk_sectors &&
 333                    (*bio)->bi_vcnt == 1 &&
 334                    ((*bio)->bi_io_vec[0].bv_len +
 335                     (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
 336                        *nr_segs = 1;
 337                        break;
 338                }
 339                split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
 340                break;
 341        }
 342
 343        if (split) {
 344                /* there isn't chance to merge the splitted bio */
 345                split->bi_opf |= REQ_NOMERGE;
 346
 347                bio_chain(split, *bio);
 348                trace_block_split(split, (*bio)->bi_iter.bi_sector);
 349                submit_bio_noacct(*bio);
 350                *bio = split;
 351
 352                blk_throtl_charge_bio_split(*bio);
 353        }
 354}
 355
 356/**
 357 * blk_queue_split - split a bio and submit the second half
 358 * @bio: [in, out] bio to be split
 359 *
 360 * Split a bio into two bios, chains the two bios, submit the second half and
 361 * store a pointer to the first half in *@bio. Since this function may allocate
 362 * a new bio from q->bio_split, it is the responsibility of the caller to ensure
 363 * that q->bio_split is only released after processing of the split bio has
 364 * finished.
 365 */
 366void blk_queue_split(struct bio **bio)
 367{
 368        unsigned int nr_segs;
 369
 370        __blk_queue_split(bio, &nr_segs);
 371}
 372EXPORT_SYMBOL(blk_queue_split);
 373
 374unsigned int blk_recalc_rq_segments(struct request *rq)
 375{
 376        unsigned int nr_phys_segs = 0;
 377        unsigned int nr_sectors = 0;
 378        struct req_iterator iter;
 379        struct bio_vec bv;
 380
 381        if (!rq->bio)
 382                return 0;
 383
 384        switch (bio_op(rq->bio)) {
 385        case REQ_OP_DISCARD:
 386        case REQ_OP_SECURE_ERASE:
 387                if (queue_max_discard_segments(rq->q) > 1) {
 388                        struct bio *bio = rq->bio;
 389
 390                        for_each_bio(bio)
 391                                nr_phys_segs++;
 392                        return nr_phys_segs;
 393                }
 394                return 1;
 395        case REQ_OP_WRITE_ZEROES:
 396                return 0;
 397        case REQ_OP_WRITE_SAME:
 398                return 1;
 399        }
 400
 401        rq_for_each_bvec(bv, rq, iter)
 402                bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
 403                                UINT_MAX, UINT_MAX);
 404        return nr_phys_segs;
 405}
 406
 407static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
 408                struct scatterlist *sglist)
 409{
 410        if (!*sg)
 411                return sglist;
 412
 413        /*
 414         * If the driver previously mapped a shorter list, we could see a
 415         * termination bit prematurely unless it fully inits the sg table
 416         * on each mapping. We KNOW that there must be more entries here
 417         * or the driver would be buggy, so force clear the termination bit
 418         * to avoid doing a full sg_init_table() in drivers for each command.
 419         */
 420        sg_unmark_end(*sg);
 421        return sg_next(*sg);
 422}
 423
 424static unsigned blk_bvec_map_sg(struct request_queue *q,
 425                struct bio_vec *bvec, struct scatterlist *sglist,
 426                struct scatterlist **sg)
 427{
 428        unsigned nbytes = bvec->bv_len;
 429        unsigned nsegs = 0, total = 0;
 430
 431        while (nbytes > 0) {
 432                unsigned offset = bvec->bv_offset + total;
 433                unsigned len = min(get_max_segment_size(q, bvec->bv_page,
 434                                        offset), nbytes);
 435                struct page *page = bvec->bv_page;
 436
 437                /*
 438                 * Unfortunately a fair number of drivers barf on scatterlists
 439                 * that have an offset larger than PAGE_SIZE, despite other
 440                 * subsystems dealing with that invariant just fine.  For now
 441                 * stick to the legacy format where we never present those from
 442                 * the block layer, but the code below should be removed once
 443                 * these offenders (mostly MMC/SD drivers) are fixed.
 444                 */
 445                page += (offset >> PAGE_SHIFT);
 446                offset &= ~PAGE_MASK;
 447
 448                *sg = blk_next_sg(sg, sglist);
 449                sg_set_page(*sg, page, len, offset);
 450
 451                total += len;
 452                nbytes -= len;
 453                nsegs++;
 454        }
 455
 456        return nsegs;
 457}
 458
 459static inline int __blk_bvec_map_sg(struct bio_vec bv,
 460                struct scatterlist *sglist, struct scatterlist **sg)
 461{
 462        *sg = blk_next_sg(sg, sglist);
 463        sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
 464        return 1;
 465}
 466
 467/* only try to merge bvecs into one sg if they are from two bios */
 468static inline bool
 469__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
 470                           struct bio_vec *bvprv, struct scatterlist **sg)
 471{
 472
 473        int nbytes = bvec->bv_len;
 474
 475        if (!*sg)
 476                return false;
 477
 478        if ((*sg)->length + nbytes > queue_max_segment_size(q))
 479                return false;
 480
 481        if (!biovec_phys_mergeable(q, bvprv, bvec))
 482                return false;
 483
 484        (*sg)->length += nbytes;
 485
 486        return true;
 487}
 488
 489static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 490                             struct scatterlist *sglist,
 491                             struct scatterlist **sg)
 492{
 493        struct bio_vec bvec, bvprv = { NULL };
 494        struct bvec_iter iter;
 495        int nsegs = 0;
 496        bool new_bio = false;
 497
 498        for_each_bio(bio) {
 499                bio_for_each_bvec(bvec, bio, iter) {
 500                        /*
 501                         * Only try to merge bvecs from two bios given we
 502                         * have done bio internal merge when adding pages
 503                         * to bio
 504                         */
 505                        if (new_bio &&
 506                            __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
 507                                goto next_bvec;
 508
 509                        if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
 510                                nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
 511                        else
 512                                nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
 513 next_bvec:
 514                        new_bio = false;
 515                }
 516                if (likely(bio->bi_iter.bi_size)) {
 517                        bvprv = bvec;
 518                        new_bio = true;
 519                }
 520        }
 521
 522        return nsegs;
 523}
 524
 525/*
 526 * map a request to scatterlist, return number of sg entries setup. Caller
 527 * must make sure sg can hold rq->nr_phys_segments entries
 528 */
 529int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
 530                struct scatterlist *sglist, struct scatterlist **last_sg)
 531{
 532        int nsegs = 0;
 533
 534        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
 535                nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
 536        else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
 537                nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
 538        else if (rq->bio)
 539                nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
 540
 541        if (*last_sg)
 542                sg_mark_end(*last_sg);
 543
 544        /*
 545         * Something must have been wrong if the figured number of
 546         * segment is bigger than number of req's physical segments
 547         */
 548        WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
 549
 550        return nsegs;
 551}
 552EXPORT_SYMBOL(__blk_rq_map_sg);
 553
 554static inline unsigned int blk_rq_get_max_segments(struct request *rq)
 555{
 556        if (req_op(rq) == REQ_OP_DISCARD)
 557                return queue_max_discard_segments(rq->q);
 558        return queue_max_segments(rq->q);
 559}
 560
 561static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
 562                unsigned int nr_phys_segs)
 563{
 564        if (blk_integrity_merge_bio(req->q, req, bio) == false)
 565                goto no_merge;
 566
 567        /* discard request merge won't add new segment */
 568        if (req_op(req) == REQ_OP_DISCARD)
 569                return 1;
 570
 571        if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
 572                goto no_merge;
 573
 574        /*
 575         * This will form the start of a new hw segment.  Bump both
 576         * counters.
 577         */
 578        req->nr_phys_segments += nr_phys_segs;
 579        return 1;
 580
 581no_merge:
 582        req_set_nomerge(req->q, req);
 583        return 0;
 584}
 585
 586int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
 587{
 588        if (req_gap_back_merge(req, bio))
 589                return 0;
 590        if (blk_integrity_rq(req) &&
 591            integrity_req_gap_back_merge(req, bio))
 592                return 0;
 593        if (!bio_crypt_ctx_back_mergeable(req, bio))
 594                return 0;
 595        if (blk_rq_sectors(req) + bio_sectors(bio) >
 596            blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
 597                req_set_nomerge(req->q, req);
 598                return 0;
 599        }
 600
 601        return ll_new_hw_segment(req, bio, nr_segs);
 602}
 603
 604static int ll_front_merge_fn(struct request *req, struct bio *bio,
 605                unsigned int nr_segs)
 606{
 607        if (req_gap_front_merge(req, bio))
 608                return 0;
 609        if (blk_integrity_rq(req) &&
 610            integrity_req_gap_front_merge(req, bio))
 611                return 0;
 612        if (!bio_crypt_ctx_front_mergeable(req, bio))
 613                return 0;
 614        if (blk_rq_sectors(req) + bio_sectors(bio) >
 615            blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
 616                req_set_nomerge(req->q, req);
 617                return 0;
 618        }
 619
 620        return ll_new_hw_segment(req, bio, nr_segs);
 621}
 622
 623static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
 624                struct request *next)
 625{
 626        unsigned short segments = blk_rq_nr_discard_segments(req);
 627
 628        if (segments >= queue_max_discard_segments(q))
 629                goto no_merge;
 630        if (blk_rq_sectors(req) + bio_sectors(next->bio) >
 631            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 632                goto no_merge;
 633
 634        req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
 635        return true;
 636no_merge:
 637        req_set_nomerge(q, req);
 638        return false;
 639}
 640
 641static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 642                                struct request *next)
 643{
 644        int total_phys_segments;
 645
 646        if (req_gap_back_merge(req, next->bio))
 647                return 0;
 648
 649        /*
 650         * Will it become too large?
 651         */
 652        if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
 653            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 654                return 0;
 655
 656        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 657        if (total_phys_segments > blk_rq_get_max_segments(req))
 658                return 0;
 659
 660        if (blk_integrity_merge_rq(q, req, next) == false)
 661                return 0;
 662
 663        if (!bio_crypt_ctx_merge_rq(req, next))
 664                return 0;
 665
 666        /* Merge is OK... */
 667        req->nr_phys_segments = total_phys_segments;
 668        return 1;
 669}
 670
 671/**
 672 * blk_rq_set_mixed_merge - mark a request as mixed merge
 673 * @rq: request to mark as mixed merge
 674 *
 675 * Description:
 676 *     @rq is about to be mixed merged.  Make sure the attributes
 677 *     which can be mixed are set in each bio and mark @rq as mixed
 678 *     merged.
 679 */
 680void blk_rq_set_mixed_merge(struct request *rq)
 681{
 682        unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 683        struct bio *bio;
 684
 685        if (rq->rq_flags & RQF_MIXED_MERGE)
 686                return;
 687
 688        /*
 689         * @rq will no longer represent mixable attributes for all the
 690         * contained bios.  It will just track those of the first one.
 691         * Distributes the attributs to each bio.
 692         */
 693        for (bio = rq->bio; bio; bio = bio->bi_next) {
 694                WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
 695                             (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
 696                bio->bi_opf |= ff;
 697        }
 698        rq->rq_flags |= RQF_MIXED_MERGE;
 699}
 700
 701static void blk_account_io_merge_request(struct request *req)
 702{
 703        if (blk_do_io_stat(req)) {
 704                part_stat_lock();
 705                part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 706                part_stat_unlock();
 707        }
 708}
 709
 710static enum elv_merge blk_try_req_merge(struct request *req,
 711                                        struct request *next)
 712{
 713        if (blk_discard_mergable(req))
 714                return ELEVATOR_DISCARD_MERGE;
 715        else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
 716                return ELEVATOR_BACK_MERGE;
 717
 718        return ELEVATOR_NO_MERGE;
 719}
 720
 721/*
 722 * For non-mq, this has to be called with the request spinlock acquired.
 723 * For mq with scheduling, the appropriate queue wide lock should be held.
 724 */
 725static struct request *attempt_merge(struct request_queue *q,
 726                                     struct request *req, struct request *next)
 727{
 728        if (!rq_mergeable(req) || !rq_mergeable(next))
 729                return NULL;
 730
 731        if (req_op(req) != req_op(next))
 732                return NULL;
 733
 734        if (rq_data_dir(req) != rq_data_dir(next)
 735            || req->rq_disk != next->rq_disk)
 736                return NULL;
 737
 738        if (req_op(req) == REQ_OP_WRITE_SAME &&
 739            !blk_write_same_mergeable(req->bio, next->bio))
 740                return NULL;
 741
 742        /*
 743         * Don't allow merge of different write hints, or for a hint with
 744         * non-hint IO.
 745         */
 746        if (req->write_hint != next->write_hint)
 747                return NULL;
 748
 749        if (req->ioprio != next->ioprio)
 750                return NULL;
 751
 752        /*
 753         * If we are allowed to merge, then append bio list
 754         * from next to rq and release next. merge_requests_fn
 755         * will have updated segment counts, update sector
 756         * counts here. Handle DISCARDs separately, as they
 757         * have separate settings.
 758         */
 759
 760        switch (blk_try_req_merge(req, next)) {
 761        case ELEVATOR_DISCARD_MERGE:
 762                if (!req_attempt_discard_merge(q, req, next))
 763                        return NULL;
 764                break;
 765        case ELEVATOR_BACK_MERGE:
 766                if (!ll_merge_requests_fn(q, req, next))
 767                        return NULL;
 768                break;
 769        default:
 770                return NULL;
 771        }
 772
 773        /*
 774         * If failfast settings disagree or any of the two is already
 775         * a mixed merge, mark both as mixed before proceeding.  This
 776         * makes sure that all involved bios have mixable attributes
 777         * set properly.
 778         */
 779        if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
 780            (req->cmd_flags & REQ_FAILFAST_MASK) !=
 781            (next->cmd_flags & REQ_FAILFAST_MASK)) {
 782                blk_rq_set_mixed_merge(req);
 783                blk_rq_set_mixed_merge(next);
 784        }
 785
 786        /*
 787         * At this point we have either done a back merge or front merge. We
 788         * need the smaller start_time_ns of the merged requests to be the
 789         * current request for accounting purposes.
 790         */
 791        if (next->start_time_ns < req->start_time_ns)
 792                req->start_time_ns = next->start_time_ns;
 793
 794        req->biotail->bi_next = next->bio;
 795        req->biotail = next->biotail;
 796
 797        req->__data_len += blk_rq_bytes(next);
 798
 799        if (!blk_discard_mergable(req))
 800                elv_merge_requests(q, req, next);
 801
 802        /*
 803         * 'next' is going away, so update stats accordingly
 804         */
 805        blk_account_io_merge_request(next);
 806
 807        trace_block_rq_merge(next);
 808
 809        /*
 810         * ownership of bio passed from next to req, return 'next' for
 811         * the caller to free
 812         */
 813        next->bio = NULL;
 814        return next;
 815}
 816
 817static struct request *attempt_back_merge(struct request_queue *q,
 818                struct request *rq)
 819{
 820        struct request *next = elv_latter_request(q, rq);
 821
 822        if (next)
 823                return attempt_merge(q, rq, next);
 824
 825        return NULL;
 826}
 827
 828static struct request *attempt_front_merge(struct request_queue *q,
 829                struct request *rq)
 830{
 831        struct request *prev = elv_former_request(q, rq);
 832
 833        if (prev)
 834                return attempt_merge(q, prev, rq);
 835
 836        return NULL;
 837}
 838
 839/*
 840 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
 841 * otherwise. The caller is responsible for freeing 'next' if the merge
 842 * happened.
 843 */
 844bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 845                           struct request *next)
 846{
 847        return attempt_merge(q, rq, next);
 848}
 849
 850bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 851{
 852        if (!rq_mergeable(rq) || !bio_mergeable(bio))
 853                return false;
 854
 855        if (req_op(rq) != bio_op(bio))
 856                return false;
 857
 858        /* different data direction or already started, don't merge */
 859        if (bio_data_dir(bio) != rq_data_dir(rq))
 860                return false;
 861
 862        /* must be same device */
 863        if (rq->rq_disk != bio->bi_bdev->bd_disk)
 864                return false;
 865
 866        /* only merge integrity protected bio into ditto rq */
 867        if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
 868                return false;
 869
 870        /* Only merge if the crypt contexts are compatible */
 871        if (!bio_crypt_rq_ctx_compatible(rq, bio))
 872                return false;
 873
 874        /* must be using the same buffer */
 875        if (req_op(rq) == REQ_OP_WRITE_SAME &&
 876            !blk_write_same_mergeable(rq->bio, bio))
 877                return false;
 878
 879        /*
 880         * Don't allow merge of different write hints, or for a hint with
 881         * non-hint IO.
 882         */
 883        if (rq->write_hint != bio->bi_write_hint)
 884                return false;
 885
 886        if (rq->ioprio != bio_prio(bio))
 887                return false;
 888
 889        return true;
 890}
 891
 892enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 893{
 894        if (blk_discard_mergable(rq))
 895                return ELEVATOR_DISCARD_MERGE;
 896        else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
 897                return ELEVATOR_BACK_MERGE;
 898        else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
 899                return ELEVATOR_FRONT_MERGE;
 900        return ELEVATOR_NO_MERGE;
 901}
 902
 903static void blk_account_io_merge_bio(struct request *req)
 904{
 905        if (!blk_do_io_stat(req))
 906                return;
 907
 908        part_stat_lock();
 909        part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 910        part_stat_unlock();
 911}
 912
 913enum bio_merge_status {
 914        BIO_MERGE_OK,
 915        BIO_MERGE_NONE,
 916        BIO_MERGE_FAILED,
 917};
 918
 919static enum bio_merge_status bio_attempt_back_merge(struct request *req,
 920                struct bio *bio, unsigned int nr_segs)
 921{
 922        const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 923
 924        if (!ll_back_merge_fn(req, bio, nr_segs))
 925                return BIO_MERGE_FAILED;
 926
 927        trace_block_bio_backmerge(bio);
 928        rq_qos_merge(req->q, req, bio);
 929
 930        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
 931                blk_rq_set_mixed_merge(req);
 932
 933        req->biotail->bi_next = bio;
 934        req->biotail = bio;
 935        req->__data_len += bio->bi_iter.bi_size;
 936
 937        bio_crypt_free_ctx(bio);
 938
 939        blk_account_io_merge_bio(req);
 940        return BIO_MERGE_OK;
 941}
 942
 943static enum bio_merge_status bio_attempt_front_merge(struct request *req,
 944                struct bio *bio, unsigned int nr_segs)
 945{
 946        const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 947
 948        if (!ll_front_merge_fn(req, bio, nr_segs))
 949                return BIO_MERGE_FAILED;
 950
 951        trace_block_bio_frontmerge(bio);
 952        rq_qos_merge(req->q, req, bio);
 953
 954        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
 955                blk_rq_set_mixed_merge(req);
 956
 957        bio->bi_next = req->bio;
 958        req->bio = bio;
 959
 960        req->__sector = bio->bi_iter.bi_sector;
 961        req->__data_len += bio->bi_iter.bi_size;
 962
 963        bio_crypt_do_front_merge(req, bio);
 964
 965        blk_account_io_merge_bio(req);
 966        return BIO_MERGE_OK;
 967}
 968
 969static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
 970                struct request *req, struct bio *bio)
 971{
 972        unsigned short segments = blk_rq_nr_discard_segments(req);
 973
 974        if (segments >= queue_max_discard_segments(q))
 975                goto no_merge;
 976        if (blk_rq_sectors(req) + bio_sectors(bio) >
 977            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 978                goto no_merge;
 979
 980        rq_qos_merge(q, req, bio);
 981
 982        req->biotail->bi_next = bio;
 983        req->biotail = bio;
 984        req->__data_len += bio->bi_iter.bi_size;
 985        req->nr_phys_segments = segments + 1;
 986
 987        blk_account_io_merge_bio(req);
 988        return BIO_MERGE_OK;
 989no_merge:
 990        req_set_nomerge(q, req);
 991        return BIO_MERGE_FAILED;
 992}
 993
 994static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
 995                                                   struct request *rq,
 996                                                   struct bio *bio,
 997                                                   unsigned int nr_segs,
 998                                                   bool sched_allow_merge)
 999{
1000        if (!blk_rq_merge_ok(rq, bio))
1001                return BIO_MERGE_NONE;
1002
1003        switch (blk_try_merge(rq, bio)) {
1004        case ELEVATOR_BACK_MERGE:
1005                if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1006                        return bio_attempt_back_merge(rq, bio, nr_segs);
1007                break;
1008        case ELEVATOR_FRONT_MERGE:
1009                if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1010                        return bio_attempt_front_merge(rq, bio, nr_segs);
1011                break;
1012        case ELEVATOR_DISCARD_MERGE:
1013                return bio_attempt_discard_merge(q, rq, bio);
1014        default:
1015                return BIO_MERGE_NONE;
1016        }
1017
1018        return BIO_MERGE_FAILED;
1019}
1020
1021/**
1022 * blk_attempt_plug_merge - try to merge with %current's plugged list
1023 * @q: request_queue new bio is being queued at
1024 * @bio: new bio being queued
1025 * @nr_segs: number of segments in @bio
1026 * @same_queue_rq: pointer to &struct request that gets filled in when
1027 * another request associated with @q is found on the plug list
1028 * (optional, may be %NULL)
1029 *
1030 * Determine whether @bio being queued on @q can be merged with a request
1031 * on %current's plugged list.  Returns %true if merge was successful,
1032 * otherwise %false.
1033 *
1034 * Plugging coalesces IOs from the same issuer for the same purpose without
1035 * going through @q->queue_lock.  As such it's more of an issuing mechanism
1036 * than scheduling, and the request, while may have elvpriv data, is not
1037 * added on the elevator at this point.  In addition, we don't have
1038 * reliable access to the elevator outside queue lock.  Only check basic
1039 * merging parameters without querying the elevator.
1040 *
1041 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1042 */
1043bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1044                unsigned int nr_segs, struct request **same_queue_rq)
1045{
1046        struct blk_plug *plug;
1047        struct request *rq;
1048        struct list_head *plug_list;
1049
1050        plug = blk_mq_plug(q, bio);
1051        if (!plug)
1052                return false;
1053
1054        plug_list = &plug->mq_list;
1055
1056        list_for_each_entry_reverse(rq, plug_list, queuelist) {
1057                if (rq->q == q && same_queue_rq) {
1058                        /*
1059                         * Only blk-mq multiple hardware queues case checks the
1060                         * rq in the same queue, there should be only one such
1061                         * rq in a queue
1062                         **/
1063                        *same_queue_rq = rq;
1064                }
1065
1066                if (rq->q != q)
1067                        continue;
1068
1069                if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1070                    BIO_MERGE_OK)
1071                        return true;
1072        }
1073
1074        return false;
1075}
1076
1077/*
1078 * Iterate list of requests and see if we can merge this bio with any
1079 * of them.
1080 */
1081bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1082                        struct bio *bio, unsigned int nr_segs)
1083{
1084        struct request *rq;
1085        int checked = 8;
1086
1087        list_for_each_entry_reverse(rq, list, queuelist) {
1088                if (!checked--)
1089                        break;
1090
1091                switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1092                case BIO_MERGE_NONE:
1093                        continue;
1094                case BIO_MERGE_OK:
1095                        return true;
1096                case BIO_MERGE_FAILED:
1097                        return false;
1098                }
1099
1100        }
1101
1102        return false;
1103}
1104EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1105
1106bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1107                unsigned int nr_segs, struct request **merged_request)
1108{
1109        struct request *rq;
1110
1111        switch (elv_merge(q, &rq, bio)) {
1112        case ELEVATOR_BACK_MERGE:
1113                if (!blk_mq_sched_allow_merge(q, rq, bio))
1114                        return false;
1115                if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1116                        return false;
1117                *merged_request = attempt_back_merge(q, rq);
1118                if (!*merged_request)
1119                        elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1120                return true;
1121        case ELEVATOR_FRONT_MERGE:
1122                if (!blk_mq_sched_allow_merge(q, rq, bio))
1123                        return false;
1124                if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1125                        return false;
1126                *merged_request = attempt_front_merge(q, rq);
1127                if (!*merged_request)
1128                        elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1129                return true;
1130        case ELEVATOR_DISCARD_MERGE:
1131                return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1132        default:
1133                return false;
1134        }
1135}
1136EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1137