linux/block/blk-merge.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions related to segment and merge handling
   4 */
   5#include <linux/kernel.h>
   6#include <linux/module.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/scatterlist.h>
  10
  11#include <trace/events/block.h>
  12
  13#include "blk.h"
  14#include "blk-rq-qos.h"
  15
  16static inline bool bio_will_gap(struct request_queue *q,
  17                struct request *prev_rq, struct bio *prev, struct bio *next)
  18{
  19        struct bio_vec pb, nb;
  20
  21        if (!bio_has_data(prev) || !queue_virt_boundary(q))
  22                return false;
  23
  24        /*
  25         * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  26         * is quite difficult to respect the sg gap limit.  We work hard to
  27         * merge a huge number of small single bios in case of mkfs.
  28         */
  29        if (prev_rq)
  30                bio_get_first_bvec(prev_rq->bio, &pb);
  31        else
  32                bio_get_first_bvec(prev, &pb);
  33        if (pb.bv_offset & queue_virt_boundary(q))
  34                return true;
  35
  36        /*
  37         * We don't need to worry about the situation that the merged segment
  38         * ends in unaligned virt boundary:
  39         *
  40         * - if 'pb' ends aligned, the merged segment ends aligned
  41         * - if 'pb' ends unaligned, the next bio must include
  42         *   one single bvec of 'nb', otherwise the 'nb' can't
  43         *   merge with 'pb'
  44         */
  45        bio_get_last_bvec(prev, &pb);
  46        bio_get_first_bvec(next, &nb);
  47        if (biovec_phys_mergeable(q, &pb, &nb))
  48                return false;
  49        return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
  50}
  51
  52static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  53{
  54        return bio_will_gap(req->q, req, req->biotail, bio);
  55}
  56
  57static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  58{
  59        return bio_will_gap(req->q, NULL, bio, req->bio);
  60}
  61
  62static struct bio *blk_bio_discard_split(struct request_queue *q,
  63                                         struct bio *bio,
  64                                         struct bio_set *bs,
  65                                         unsigned *nsegs)
  66{
  67        unsigned int max_discard_sectors, granularity;
  68        int alignment;
  69        sector_t tmp;
  70        unsigned split_sectors;
  71
  72        *nsegs = 1;
  73
  74        /* Zero-sector (unknown) and one-sector granularities are the same.  */
  75        granularity = max(q->limits.discard_granularity >> 9, 1U);
  76
  77        max_discard_sectors = min(q->limits.max_discard_sectors,
  78                        bio_allowed_max_sectors(q));
  79        max_discard_sectors -= max_discard_sectors % granularity;
  80
  81        if (unlikely(!max_discard_sectors)) {
  82                /* XXX: warn */
  83                return NULL;
  84        }
  85
  86        if (bio_sectors(bio) <= max_discard_sectors)
  87                return NULL;
  88
  89        split_sectors = max_discard_sectors;
  90
  91        /*
  92         * If the next starting sector would be misaligned, stop the discard at
  93         * the previous aligned sector.
  94         */
  95        alignment = (q->limits.discard_alignment >> 9) % granularity;
  96
  97        tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  98        tmp = sector_div(tmp, granularity);
  99
 100        if (split_sectors > tmp)
 101                split_sectors -= tmp;
 102
 103        return bio_split(bio, split_sectors, GFP_NOIO, bs);
 104}
 105
 106static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
 107                struct bio *bio, struct bio_set *bs, unsigned *nsegs)
 108{
 109        *nsegs = 0;
 110
 111        if (!q->limits.max_write_zeroes_sectors)
 112                return NULL;
 113
 114        if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
 115                return NULL;
 116
 117        return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
 118}
 119
 120static struct bio *blk_bio_write_same_split(struct request_queue *q,
 121                                            struct bio *bio,
 122                                            struct bio_set *bs,
 123                                            unsigned *nsegs)
 124{
 125        *nsegs = 1;
 126
 127        if (!q->limits.max_write_same_sectors)
 128                return NULL;
 129
 130        if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
 131                return NULL;
 132
 133        return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 134}
 135
 136/*
 137 * Return the maximum number of sectors from the start of a bio that may be
 138 * submitted as a single request to a block device. If enough sectors remain,
 139 * align the end to the physical block size. Otherwise align the end to the
 140 * logical block size. This approach minimizes the number of non-aligned
 141 * requests that are submitted to a block device if the start of a bio is not
 142 * aligned to a physical block boundary.
 143 */
 144static inline unsigned get_max_io_size(struct request_queue *q,
 145                                       struct bio *bio)
 146{
 147        unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
 148        unsigned max_sectors = sectors;
 149        unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
 150        unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
 151        unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
 152
 153        max_sectors += start_offset;
 154        max_sectors &= ~(pbs - 1);
 155        if (max_sectors > start_offset)
 156                return max_sectors - start_offset;
 157
 158        return sectors & ~(lbs - 1);
 159}
 160
 161static inline unsigned get_max_segment_size(const struct request_queue *q,
 162                                            struct page *start_page,
 163                                            unsigned long offset)
 164{
 165        unsigned long mask = queue_segment_boundary(q);
 166
 167        offset = mask & (page_to_phys(start_page) + offset);
 168
 169        /*
 170         * overflow may be triggered in case of zero page physical address
 171         * on 32bit arch, use queue's max segment size when that happens.
 172         */
 173        return min_not_zero(mask - offset + 1,
 174                        (unsigned long)queue_max_segment_size(q));
 175}
 176
 177/**
 178 * bvec_split_segs - verify whether or not a bvec should be split in the middle
 179 * @q:        [in] request queue associated with the bio associated with @bv
 180 * @bv:       [in] bvec to examine
 181 * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
 182 *            by the number of segments from @bv that may be appended to that
 183 *            bio without exceeding @max_segs
 184 * @sectors:  [in,out] Number of sectors in the bio being built. Incremented
 185 *            by the number of sectors from @bv that may be appended to that
 186 *            bio without exceeding @max_sectors
 187 * @max_segs: [in] upper bound for *@nsegs
 188 * @max_sectors: [in] upper bound for *@sectors
 189 *
 190 * When splitting a bio, it can happen that a bvec is encountered that is too
 191 * big to fit in a single segment and hence that it has to be split in the
 192 * middle. This function verifies whether or not that should happen. The value
 193 * %true is returned if and only if appending the entire @bv to a bio with
 194 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
 195 * the block driver.
 196 */
 197static bool bvec_split_segs(const struct request_queue *q,
 198                            const struct bio_vec *bv, unsigned *nsegs,
 199                            unsigned *sectors, unsigned max_segs,
 200                            unsigned max_sectors)
 201{
 202        unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
 203        unsigned len = min(bv->bv_len, max_len);
 204        unsigned total_len = 0;
 205        unsigned seg_size = 0;
 206
 207        while (len && *nsegs < max_segs) {
 208                seg_size = get_max_segment_size(q, bv->bv_page,
 209                                                bv->bv_offset + total_len);
 210                seg_size = min(seg_size, len);
 211
 212                (*nsegs)++;
 213                total_len += seg_size;
 214                len -= seg_size;
 215
 216                if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
 217                        break;
 218        }
 219
 220        *sectors += total_len >> 9;
 221
 222        /* tell the caller to split the bvec if it is too big to fit */
 223        return len > 0 || bv->bv_len > max_len;
 224}
 225
 226/**
 227 * blk_bio_segment_split - split a bio in two bios
 228 * @q:    [in] request queue pointer
 229 * @bio:  [in] bio to be split
 230 * @bs:   [in] bio set to allocate the clone from
 231 * @segs: [out] number of segments in the bio with the first half of the sectors
 232 *
 233 * Clone @bio, update the bi_iter of the clone to represent the first sectors
 234 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
 235 * following is guaranteed for the cloned bio:
 236 * - That it has at most get_max_io_size(@q, @bio) sectors.
 237 * - That it has at most queue_max_segments(@q) segments.
 238 *
 239 * Except for discard requests the cloned bio will point at the bi_io_vec of
 240 * the original bio. It is the responsibility of the caller to ensure that the
 241 * original bio is not freed before the cloned bio. The caller is also
 242 * responsible for ensuring that @bs is only destroyed after processing of the
 243 * split bio has finished.
 244 */
 245static struct bio *blk_bio_segment_split(struct request_queue *q,
 246                                         struct bio *bio,
 247                                         struct bio_set *bs,
 248                                         unsigned *segs)
 249{
 250        struct bio_vec bv, bvprv, *bvprvp = NULL;
 251        struct bvec_iter iter;
 252        unsigned nsegs = 0, sectors = 0;
 253        const unsigned max_sectors = get_max_io_size(q, bio);
 254        const unsigned max_segs = queue_max_segments(q);
 255
 256        bio_for_each_bvec(bv, bio, iter) {
 257                /*
 258                 * If the queue doesn't support SG gaps and adding this
 259                 * offset would create a gap, disallow it.
 260                 */
 261                if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
 262                        goto split;
 263
 264                if (nsegs < max_segs &&
 265                    sectors + (bv.bv_len >> 9) <= max_sectors &&
 266                    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
 267                        nsegs++;
 268                        sectors += bv.bv_len >> 9;
 269                } else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
 270                                         max_sectors)) {
 271                        goto split;
 272                }
 273
 274                bvprv = bv;
 275                bvprvp = &bvprv;
 276        }
 277
 278        *segs = nsegs;
 279        return NULL;
 280split:
 281        *segs = nsegs;
 282
 283        /*
 284         * Bio splitting may cause subtle trouble such as hang when doing sync
 285         * iopoll in direct IO routine. Given performance gain of iopoll for
 286         * big IO can be trival, disable iopoll when split needed.
 287         */
 288        bio->bi_opf &= ~REQ_HIPRI;
 289
 290        return bio_split(bio, sectors, GFP_NOIO, bs);
 291}
 292
 293/**
 294 * __blk_queue_split - split a bio and submit the second half
 295 * @bio:     [in, out] bio to be split
 296 * @nr_segs: [out] number of segments in the first bio
 297 *
 298 * Split a bio into two bios, chain the two bios, submit the second half and
 299 * store a pointer to the first half in *@bio. If the second bio is still too
 300 * big it will be split by a recursive call to this function. Since this
 301 * function may allocate a new bio from q->bio_split, it is the responsibility
 302 * of the caller to ensure that q->bio_split is only released after processing
 303 * of the split bio has finished.
 304 */
 305void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
 306{
 307        struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
 308        struct bio *split = NULL;
 309
 310        switch (bio_op(*bio)) {
 311        case REQ_OP_DISCARD:
 312        case REQ_OP_SECURE_ERASE:
 313                split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
 314                break;
 315        case REQ_OP_WRITE_ZEROES:
 316                split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
 317                                nr_segs);
 318                break;
 319        case REQ_OP_WRITE_SAME:
 320                split = blk_bio_write_same_split(q, *bio, &q->bio_split,
 321                                nr_segs);
 322                break;
 323        default:
 324                /*
 325                 * All drivers must accept single-segments bios that are <=
 326                 * PAGE_SIZE.  This is a quick and dirty check that relies on
 327                 * the fact that bi_io_vec[0] is always valid if a bio has data.
 328                 * The check might lead to occasional false negatives when bios
 329                 * are cloned, but compared to the performance impact of cloned
 330                 * bios themselves the loop below doesn't matter anyway.
 331                 */
 332                if (!q->limits.chunk_sectors &&
 333                    (*bio)->bi_vcnt == 1 &&
 334                    ((*bio)->bi_io_vec[0].bv_len +
 335                     (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
 336                        *nr_segs = 1;
 337                        break;
 338                }
 339                split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
 340                break;
 341        }
 342
 343        if (split) {
 344                /* there isn't chance to merge the splitted bio */
 345                split->bi_opf |= REQ_NOMERGE;
 346
 347                bio_chain(split, *bio);
 348                trace_block_split(split, (*bio)->bi_iter.bi_sector);
 349                submit_bio_noacct(*bio);
 350                *bio = split;
 351        }
 352}
 353
 354/**
 355 * blk_queue_split - split a bio and submit the second half
 356 * @bio: [in, out] bio to be split
 357 *
 358 * Split a bio into two bios, chains the two bios, submit the second half and
 359 * store a pointer to the first half in *@bio. Since this function may allocate
 360 * a new bio from q->bio_split, it is the responsibility of the caller to ensure
 361 * that q->bio_split is only released after processing of the split bio has
 362 * finished.
 363 */
 364void blk_queue_split(struct bio **bio)
 365{
 366        unsigned int nr_segs;
 367
 368        __blk_queue_split(bio, &nr_segs);
 369}
 370EXPORT_SYMBOL(blk_queue_split);
 371
 372unsigned int blk_recalc_rq_segments(struct request *rq)
 373{
 374        unsigned int nr_phys_segs = 0;
 375        unsigned int nr_sectors = 0;
 376        struct req_iterator iter;
 377        struct bio_vec bv;
 378
 379        if (!rq->bio)
 380                return 0;
 381
 382        switch (bio_op(rq->bio)) {
 383        case REQ_OP_DISCARD:
 384        case REQ_OP_SECURE_ERASE:
 385                if (queue_max_discard_segments(rq->q) > 1) {
 386                        struct bio *bio = rq->bio;
 387
 388                        for_each_bio(bio)
 389                                nr_phys_segs++;
 390                        return nr_phys_segs;
 391                }
 392                return 1;
 393        case REQ_OP_WRITE_ZEROES:
 394                return 0;
 395        case REQ_OP_WRITE_SAME:
 396                return 1;
 397        }
 398
 399        rq_for_each_bvec(bv, rq, iter)
 400                bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
 401                                UINT_MAX, UINT_MAX);
 402        return nr_phys_segs;
 403}
 404
 405static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
 406                struct scatterlist *sglist)
 407{
 408        if (!*sg)
 409                return sglist;
 410
 411        /*
 412         * If the driver previously mapped a shorter list, we could see a
 413         * termination bit prematurely unless it fully inits the sg table
 414         * on each mapping. We KNOW that there must be more entries here
 415         * or the driver would be buggy, so force clear the termination bit
 416         * to avoid doing a full sg_init_table() in drivers for each command.
 417         */
 418        sg_unmark_end(*sg);
 419        return sg_next(*sg);
 420}
 421
 422static unsigned blk_bvec_map_sg(struct request_queue *q,
 423                struct bio_vec *bvec, struct scatterlist *sglist,
 424                struct scatterlist **sg)
 425{
 426        unsigned nbytes = bvec->bv_len;
 427        unsigned nsegs = 0, total = 0;
 428
 429        while (nbytes > 0) {
 430                unsigned offset = bvec->bv_offset + total;
 431                unsigned len = min(get_max_segment_size(q, bvec->bv_page,
 432                                        offset), nbytes);
 433                struct page *page = bvec->bv_page;
 434
 435                /*
 436                 * Unfortunately a fair number of drivers barf on scatterlists
 437                 * that have an offset larger than PAGE_SIZE, despite other
 438                 * subsystems dealing with that invariant just fine.  For now
 439                 * stick to the legacy format where we never present those from
 440                 * the block layer, but the code below should be removed once
 441                 * these offenders (mostly MMC/SD drivers) are fixed.
 442                 */
 443                page += (offset >> PAGE_SHIFT);
 444                offset &= ~PAGE_MASK;
 445
 446                *sg = blk_next_sg(sg, sglist);
 447                sg_set_page(*sg, page, len, offset);
 448
 449                total += len;
 450                nbytes -= len;
 451                nsegs++;
 452        }
 453
 454        return nsegs;
 455}
 456
 457static inline int __blk_bvec_map_sg(struct bio_vec bv,
 458                struct scatterlist *sglist, struct scatterlist **sg)
 459{
 460        *sg = blk_next_sg(sg, sglist);
 461        sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
 462        return 1;
 463}
 464
 465/* only try to merge bvecs into one sg if they are from two bios */
 466static inline bool
 467__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
 468                           struct bio_vec *bvprv, struct scatterlist **sg)
 469{
 470
 471        int nbytes = bvec->bv_len;
 472
 473        if (!*sg)
 474                return false;
 475
 476        if ((*sg)->length + nbytes > queue_max_segment_size(q))
 477                return false;
 478
 479        if (!biovec_phys_mergeable(q, bvprv, bvec))
 480                return false;
 481
 482        (*sg)->length += nbytes;
 483
 484        return true;
 485}
 486
 487static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
 488                             struct scatterlist *sglist,
 489                             struct scatterlist **sg)
 490{
 491        struct bio_vec bvec, bvprv = { NULL };
 492        struct bvec_iter iter;
 493        int nsegs = 0;
 494        bool new_bio = false;
 495
 496        for_each_bio(bio) {
 497                bio_for_each_bvec(bvec, bio, iter) {
 498                        /*
 499                         * Only try to merge bvecs from two bios given we
 500                         * have done bio internal merge when adding pages
 501                         * to bio
 502                         */
 503                        if (new_bio &&
 504                            __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
 505                                goto next_bvec;
 506
 507                        if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
 508                                nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
 509                        else
 510                                nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
 511 next_bvec:
 512                        new_bio = false;
 513                }
 514                if (likely(bio->bi_iter.bi_size)) {
 515                        bvprv = bvec;
 516                        new_bio = true;
 517                }
 518        }
 519
 520        return nsegs;
 521}
 522
 523/*
 524 * map a request to scatterlist, return number of sg entries setup. Caller
 525 * must make sure sg can hold rq->nr_phys_segments entries
 526 */
 527int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
 528                struct scatterlist *sglist, struct scatterlist **last_sg)
 529{
 530        int nsegs = 0;
 531
 532        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
 533                nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
 534        else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
 535                nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
 536        else if (rq->bio)
 537                nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
 538
 539        if (*last_sg)
 540                sg_mark_end(*last_sg);
 541
 542        /*
 543         * Something must have been wrong if the figured number of
 544         * segment is bigger than number of req's physical segments
 545         */
 546        WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
 547
 548        return nsegs;
 549}
 550EXPORT_SYMBOL(__blk_rq_map_sg);
 551
 552static inline unsigned int blk_rq_get_max_segments(struct request *rq)
 553{
 554        if (req_op(rq) == REQ_OP_DISCARD)
 555                return queue_max_discard_segments(rq->q);
 556        return queue_max_segments(rq->q);
 557}
 558
 559static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
 560                unsigned int nr_phys_segs)
 561{
 562        if (blk_integrity_merge_bio(req->q, req, bio) == false)
 563                goto no_merge;
 564
 565        /* discard request merge won't add new segment */
 566        if (req_op(req) == REQ_OP_DISCARD)
 567                return 1;
 568
 569        if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
 570                goto no_merge;
 571
 572        /*
 573         * This will form the start of a new hw segment.  Bump both
 574         * counters.
 575         */
 576        req->nr_phys_segments += nr_phys_segs;
 577        return 1;
 578
 579no_merge:
 580        req_set_nomerge(req->q, req);
 581        return 0;
 582}
 583
 584int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
 585{
 586        if (req_gap_back_merge(req, bio))
 587                return 0;
 588        if (blk_integrity_rq(req) &&
 589            integrity_req_gap_back_merge(req, bio))
 590                return 0;
 591        if (!bio_crypt_ctx_back_mergeable(req, bio))
 592                return 0;
 593        if (blk_rq_sectors(req) + bio_sectors(bio) >
 594            blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
 595                req_set_nomerge(req->q, req);
 596                return 0;
 597        }
 598
 599        return ll_new_hw_segment(req, bio, nr_segs);
 600}
 601
 602static int ll_front_merge_fn(struct request *req, struct bio *bio,
 603                unsigned int nr_segs)
 604{
 605        if (req_gap_front_merge(req, bio))
 606                return 0;
 607        if (blk_integrity_rq(req) &&
 608            integrity_req_gap_front_merge(req, bio))
 609                return 0;
 610        if (!bio_crypt_ctx_front_mergeable(req, bio))
 611                return 0;
 612        if (blk_rq_sectors(req) + bio_sectors(bio) >
 613            blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
 614                req_set_nomerge(req->q, req);
 615                return 0;
 616        }
 617
 618        return ll_new_hw_segment(req, bio, nr_segs);
 619}
 620
 621static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
 622                struct request *next)
 623{
 624        unsigned short segments = blk_rq_nr_discard_segments(req);
 625
 626        if (segments >= queue_max_discard_segments(q))
 627                goto no_merge;
 628        if (blk_rq_sectors(req) + bio_sectors(next->bio) >
 629            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 630                goto no_merge;
 631
 632        req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
 633        return true;
 634no_merge:
 635        req_set_nomerge(q, req);
 636        return false;
 637}
 638
 639static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 640                                struct request *next)
 641{
 642        int total_phys_segments;
 643
 644        if (req_gap_back_merge(req, next->bio))
 645                return 0;
 646
 647        /*
 648         * Will it become too large?
 649         */
 650        if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
 651            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 652                return 0;
 653
 654        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
 655        if (total_phys_segments > blk_rq_get_max_segments(req))
 656                return 0;
 657
 658        if (blk_integrity_merge_rq(q, req, next) == false)
 659                return 0;
 660
 661        if (!bio_crypt_ctx_merge_rq(req, next))
 662                return 0;
 663
 664        /* Merge is OK... */
 665        req->nr_phys_segments = total_phys_segments;
 666        return 1;
 667}
 668
 669/**
 670 * blk_rq_set_mixed_merge - mark a request as mixed merge
 671 * @rq: request to mark as mixed merge
 672 *
 673 * Description:
 674 *     @rq is about to be mixed merged.  Make sure the attributes
 675 *     which can be mixed are set in each bio and mark @rq as mixed
 676 *     merged.
 677 */
 678void blk_rq_set_mixed_merge(struct request *rq)
 679{
 680        unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
 681        struct bio *bio;
 682
 683        if (rq->rq_flags & RQF_MIXED_MERGE)
 684                return;
 685
 686        /*
 687         * @rq will no longer represent mixable attributes for all the
 688         * contained bios.  It will just track those of the first one.
 689         * Distributes the attributs to each bio.
 690         */
 691        for (bio = rq->bio; bio; bio = bio->bi_next) {
 692                WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
 693                             (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
 694                bio->bi_opf |= ff;
 695        }
 696        rq->rq_flags |= RQF_MIXED_MERGE;
 697}
 698
 699static void blk_account_io_merge_request(struct request *req)
 700{
 701        if (blk_do_io_stat(req)) {
 702                part_stat_lock();
 703                part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 704                part_stat_unlock();
 705        }
 706}
 707
 708/*
 709 * Two cases of handling DISCARD merge:
 710 * If max_discard_segments > 1, the driver takes every bio
 711 * as a range and send them to controller together. The ranges
 712 * needn't to be contiguous.
 713 * Otherwise, the bios/requests will be handled as same as
 714 * others which should be contiguous.
 715 */
 716static inline bool blk_discard_mergable(struct request *req)
 717{
 718        if (req_op(req) == REQ_OP_DISCARD &&
 719            queue_max_discard_segments(req->q) > 1)
 720                return true;
 721        return false;
 722}
 723
 724static enum elv_merge blk_try_req_merge(struct request *req,
 725                                        struct request *next)
 726{
 727        if (blk_discard_mergable(req))
 728                return ELEVATOR_DISCARD_MERGE;
 729        else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
 730                return ELEVATOR_BACK_MERGE;
 731
 732        return ELEVATOR_NO_MERGE;
 733}
 734
 735/*
 736 * For non-mq, this has to be called with the request spinlock acquired.
 737 * For mq with scheduling, the appropriate queue wide lock should be held.
 738 */
 739static struct request *attempt_merge(struct request_queue *q,
 740                                     struct request *req, struct request *next)
 741{
 742        if (!rq_mergeable(req) || !rq_mergeable(next))
 743                return NULL;
 744
 745        if (req_op(req) != req_op(next))
 746                return NULL;
 747
 748        if (rq_data_dir(req) != rq_data_dir(next)
 749            || req->rq_disk != next->rq_disk)
 750                return NULL;
 751
 752        if (req_op(req) == REQ_OP_WRITE_SAME &&
 753            !blk_write_same_mergeable(req->bio, next->bio))
 754                return NULL;
 755
 756        /*
 757         * Don't allow merge of different write hints, or for a hint with
 758         * non-hint IO.
 759         */
 760        if (req->write_hint != next->write_hint)
 761                return NULL;
 762
 763        if (req->ioprio != next->ioprio)
 764                return NULL;
 765
 766        /*
 767         * If we are allowed to merge, then append bio list
 768         * from next to rq and release next. merge_requests_fn
 769         * will have updated segment counts, update sector
 770         * counts here. Handle DISCARDs separately, as they
 771         * have separate settings.
 772         */
 773
 774        switch (blk_try_req_merge(req, next)) {
 775        case ELEVATOR_DISCARD_MERGE:
 776                if (!req_attempt_discard_merge(q, req, next))
 777                        return NULL;
 778                break;
 779        case ELEVATOR_BACK_MERGE:
 780                if (!ll_merge_requests_fn(q, req, next))
 781                        return NULL;
 782                break;
 783        default:
 784                return NULL;
 785        }
 786
 787        /*
 788         * If failfast settings disagree or any of the two is already
 789         * a mixed merge, mark both as mixed before proceeding.  This
 790         * makes sure that all involved bios have mixable attributes
 791         * set properly.
 792         */
 793        if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
 794            (req->cmd_flags & REQ_FAILFAST_MASK) !=
 795            (next->cmd_flags & REQ_FAILFAST_MASK)) {
 796                blk_rq_set_mixed_merge(req);
 797                blk_rq_set_mixed_merge(next);
 798        }
 799
 800        /*
 801         * At this point we have either done a back merge or front merge. We
 802         * need the smaller start_time_ns of the merged requests to be the
 803         * current request for accounting purposes.
 804         */
 805        if (next->start_time_ns < req->start_time_ns)
 806                req->start_time_ns = next->start_time_ns;
 807
 808        req->biotail->bi_next = next->bio;
 809        req->biotail = next->biotail;
 810
 811        req->__data_len += blk_rq_bytes(next);
 812
 813        if (!blk_discard_mergable(req))
 814                elv_merge_requests(q, req, next);
 815
 816        /*
 817         * 'next' is going away, so update stats accordingly
 818         */
 819        blk_account_io_merge_request(next);
 820
 821        trace_block_rq_merge(next);
 822
 823        /*
 824         * ownership of bio passed from next to req, return 'next' for
 825         * the caller to free
 826         */
 827        next->bio = NULL;
 828        return next;
 829}
 830
 831static struct request *attempt_back_merge(struct request_queue *q,
 832                struct request *rq)
 833{
 834        struct request *next = elv_latter_request(q, rq);
 835
 836        if (next)
 837                return attempt_merge(q, rq, next);
 838
 839        return NULL;
 840}
 841
 842static struct request *attempt_front_merge(struct request_queue *q,
 843                struct request *rq)
 844{
 845        struct request *prev = elv_former_request(q, rq);
 846
 847        if (prev)
 848                return attempt_merge(q, prev, rq);
 849
 850        return NULL;
 851}
 852
 853/*
 854 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
 855 * otherwise. The caller is responsible for freeing 'next' if the merge
 856 * happened.
 857 */
 858bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 859                           struct request *next)
 860{
 861        return attempt_merge(q, rq, next);
 862}
 863
 864bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 865{
 866        if (!rq_mergeable(rq) || !bio_mergeable(bio))
 867                return false;
 868
 869        if (req_op(rq) != bio_op(bio))
 870                return false;
 871
 872        /* different data direction or already started, don't merge */
 873        if (bio_data_dir(bio) != rq_data_dir(rq))
 874                return false;
 875
 876        /* must be same device */
 877        if (rq->rq_disk != bio->bi_bdev->bd_disk)
 878                return false;
 879
 880        /* only merge integrity protected bio into ditto rq */
 881        if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
 882                return false;
 883
 884        /* Only merge if the crypt contexts are compatible */
 885        if (!bio_crypt_rq_ctx_compatible(rq, bio))
 886                return false;
 887
 888        /* must be using the same buffer */
 889        if (req_op(rq) == REQ_OP_WRITE_SAME &&
 890            !blk_write_same_mergeable(rq->bio, bio))
 891                return false;
 892
 893        /*
 894         * Don't allow merge of different write hints, or for a hint with
 895         * non-hint IO.
 896         */
 897        if (rq->write_hint != bio->bi_write_hint)
 898                return false;
 899
 900        if (rq->ioprio != bio_prio(bio))
 901                return false;
 902
 903        return true;
 904}
 905
 906enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 907{
 908        if (blk_discard_mergable(rq))
 909                return ELEVATOR_DISCARD_MERGE;
 910        else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
 911                return ELEVATOR_BACK_MERGE;
 912        else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
 913                return ELEVATOR_FRONT_MERGE;
 914        return ELEVATOR_NO_MERGE;
 915}
 916
 917static void blk_account_io_merge_bio(struct request *req)
 918{
 919        if (!blk_do_io_stat(req))
 920                return;
 921
 922        part_stat_lock();
 923        part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
 924        part_stat_unlock();
 925}
 926
 927enum bio_merge_status {
 928        BIO_MERGE_OK,
 929        BIO_MERGE_NONE,
 930        BIO_MERGE_FAILED,
 931};
 932
 933static enum bio_merge_status bio_attempt_back_merge(struct request *req,
 934                struct bio *bio, unsigned int nr_segs)
 935{
 936        const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 937
 938        if (!ll_back_merge_fn(req, bio, nr_segs))
 939                return BIO_MERGE_FAILED;
 940
 941        trace_block_bio_backmerge(bio);
 942        rq_qos_merge(req->q, req, bio);
 943
 944        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
 945                blk_rq_set_mixed_merge(req);
 946
 947        req->biotail->bi_next = bio;
 948        req->biotail = bio;
 949        req->__data_len += bio->bi_iter.bi_size;
 950
 951        bio_crypt_free_ctx(bio);
 952
 953        blk_account_io_merge_bio(req);
 954        return BIO_MERGE_OK;
 955}
 956
 957static enum bio_merge_status bio_attempt_front_merge(struct request *req,
 958                struct bio *bio, unsigned int nr_segs)
 959{
 960        const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
 961
 962        if (!ll_front_merge_fn(req, bio, nr_segs))
 963                return BIO_MERGE_FAILED;
 964
 965        trace_block_bio_frontmerge(bio);
 966        rq_qos_merge(req->q, req, bio);
 967
 968        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
 969                blk_rq_set_mixed_merge(req);
 970
 971        bio->bi_next = req->bio;
 972        req->bio = bio;
 973
 974        req->__sector = bio->bi_iter.bi_sector;
 975        req->__data_len += bio->bi_iter.bi_size;
 976
 977        bio_crypt_do_front_merge(req, bio);
 978
 979        blk_account_io_merge_bio(req);
 980        return BIO_MERGE_OK;
 981}
 982
 983static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
 984                struct request *req, struct bio *bio)
 985{
 986        unsigned short segments = blk_rq_nr_discard_segments(req);
 987
 988        if (segments >= queue_max_discard_segments(q))
 989                goto no_merge;
 990        if (blk_rq_sectors(req) + bio_sectors(bio) >
 991            blk_rq_get_max_sectors(req, blk_rq_pos(req)))
 992                goto no_merge;
 993
 994        rq_qos_merge(q, req, bio);
 995
 996        req->biotail->bi_next = bio;
 997        req->biotail = bio;
 998        req->__data_len += bio->bi_iter.bi_size;
 999        req->nr_phys_segments = segments + 1;
1000
1001        blk_account_io_merge_bio(req);
1002        return BIO_MERGE_OK;
1003no_merge:
1004        req_set_nomerge(q, req);
1005        return BIO_MERGE_FAILED;
1006}
1007
1008static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1009                                                   struct request *rq,
1010                                                   struct bio *bio,
1011                                                   unsigned int nr_segs,
1012                                                   bool sched_allow_merge)
1013{
1014        if (!blk_rq_merge_ok(rq, bio))
1015                return BIO_MERGE_NONE;
1016
1017        switch (blk_try_merge(rq, bio)) {
1018        case ELEVATOR_BACK_MERGE:
1019                if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1020                        return bio_attempt_back_merge(rq, bio, nr_segs);
1021                break;
1022        case ELEVATOR_FRONT_MERGE:
1023                if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1024                        return bio_attempt_front_merge(rq, bio, nr_segs);
1025                break;
1026        case ELEVATOR_DISCARD_MERGE:
1027                return bio_attempt_discard_merge(q, rq, bio);
1028        default:
1029                return BIO_MERGE_NONE;
1030        }
1031
1032        return BIO_MERGE_FAILED;
1033}
1034
1035/**
1036 * blk_attempt_plug_merge - try to merge with %current's plugged list
1037 * @q: request_queue new bio is being queued at
1038 * @bio: new bio being queued
1039 * @nr_segs: number of segments in @bio
1040 * @same_queue_rq: pointer to &struct request that gets filled in when
1041 * another request associated with @q is found on the plug list
1042 * (optional, may be %NULL)
1043 *
1044 * Determine whether @bio being queued on @q can be merged with a request
1045 * on %current's plugged list.  Returns %true if merge was successful,
1046 * otherwise %false.
1047 *
1048 * Plugging coalesces IOs from the same issuer for the same purpose without
1049 * going through @q->queue_lock.  As such it's more of an issuing mechanism
1050 * than scheduling, and the request, while may have elvpriv data, is not
1051 * added on the elevator at this point.  In addition, we don't have
1052 * reliable access to the elevator outside queue lock.  Only check basic
1053 * merging parameters without querying the elevator.
1054 *
1055 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1056 */
1057bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1058                unsigned int nr_segs, struct request **same_queue_rq)
1059{
1060        struct blk_plug *plug;
1061        struct request *rq;
1062        struct list_head *plug_list;
1063
1064        plug = blk_mq_plug(q, bio);
1065        if (!plug)
1066                return false;
1067
1068        plug_list = &plug->mq_list;
1069
1070        list_for_each_entry_reverse(rq, plug_list, queuelist) {
1071                if (rq->q == q && same_queue_rq) {
1072                        /*
1073                         * Only blk-mq multiple hardware queues case checks the
1074                         * rq in the same queue, there should be only one such
1075                         * rq in a queue
1076                         **/
1077                        *same_queue_rq = rq;
1078                }
1079
1080                if (rq->q != q)
1081                        continue;
1082
1083                if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1084                    BIO_MERGE_OK)
1085                        return true;
1086        }
1087
1088        return false;
1089}
1090
1091/*
1092 * Iterate list of requests and see if we can merge this bio with any
1093 * of them.
1094 */
1095bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1096                        struct bio *bio, unsigned int nr_segs)
1097{
1098        struct request *rq;
1099        int checked = 8;
1100
1101        list_for_each_entry_reverse(rq, list, queuelist) {
1102                if (!checked--)
1103                        break;
1104
1105                switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1106                case BIO_MERGE_NONE:
1107                        continue;
1108                case BIO_MERGE_OK:
1109                        return true;
1110                case BIO_MERGE_FAILED:
1111                        return false;
1112                }
1113
1114        }
1115
1116        return false;
1117}
1118EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1119
1120bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1121                unsigned int nr_segs, struct request **merged_request)
1122{
1123        struct request *rq;
1124
1125        switch (elv_merge(q, &rq, bio)) {
1126        case ELEVATOR_BACK_MERGE:
1127                if (!blk_mq_sched_allow_merge(q, rq, bio))
1128                        return false;
1129                if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1130                        return false;
1131                *merged_request = attempt_back_merge(q, rq);
1132                if (!*merged_request)
1133                        elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1134                return true;
1135        case ELEVATOR_FRONT_MERGE:
1136                if (!blk_mq_sched_allow_merge(q, rq, bio))
1137                        return false;
1138                if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1139                        return false;
1140                *merged_request = attempt_front_merge(q, rq);
1141                if (!*merged_request)
1142                        elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1143                return true;
1144        case ELEVATOR_DISCARD_MERGE:
1145                return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1146        default:
1147                return false;
1148        }
1149}
1150EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1151