linux/block/blk-flush.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions to sequence PREFLUSH and FUA writes.
   4 *
   5 * Copyright (C) 2011           Max Planck Institute for Gravitational Physics
   6 * Copyright (C) 2011           Tejun Heo <tj@kernel.org>
   7 *
   8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
   9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
  10 * properties and hardware capability.
  11 *
  12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
  13 * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
  14 * that the device cache should be flushed before the data is executed, and
  15 * REQ_FUA means that the data must be on non-volatile media on request
  16 * completion.
  17 *
  18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
  19 * difference.  The requests are either completed immediately if there's no data
  20 * or executed as normal requests otherwise.
  21 *
  22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
  23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
  24 *
  25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
  26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
  27 *
  28 * The actual execution of flush is double buffered.  Whenever a request
  29 * needs to execute PRE or POSTFLUSH, it queues at
  30 * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
  31 * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
  32 * completes, all the requests which were pending are proceeded to the next
  33 * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
  34 * requests.
  35 *
  36 * Currently, the following conditions are used to determine when to issue
  37 * flush.
  38 *
  39 * C1. At any given time, only one flush shall be in progress.  This makes
  40 *     double buffering sufficient.
  41 *
  42 * C2. Flush is deferred if any request is executing DATA of its sequence.
  43 *     This avoids issuing separate POSTFLUSHes for requests which shared
  44 *     PREFLUSH.
  45 *
  46 * C3. The second condition is ignored if there is a request which has
  47 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
  48 *     starvation in the unlikely case where there are continuous stream of
  49 *     FUA (without PREFLUSH) requests.
  50 *
  51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
  52 * is beneficial.
  53 *
  54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
  55 * Once while executing DATA and again after the whole sequence is
  56 * complete.  The first completion updates the contained bio but doesn't
  57 * finish it so that the bio submitter is notified only after the whole
  58 * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
  59 * req_bio_endio().
  60 *
  61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
  62 * bio attached to it, which is guaranteed as they aren't allowed to be
  63 * merged in the usual way.
  64 */
  65
  66#include <linux/kernel.h>
  67#include <linux/module.h>
  68#include <linux/bio.h>
  69#include <linux/blkdev.h>
  70#include <linux/gfp.h>
  71#include <linux/blk-mq.h>
  72
  73#include "blk.h"
  74#include "blk-mq.h"
  75#include "blk-mq-tag.h"
  76#include "blk-mq-sched.h"
  77
  78/* PREFLUSH/FUA sequences */
  79enum {
  80        REQ_FSEQ_PREFLUSH       = (1 << 0), /* pre-flushing in progress */
  81        REQ_FSEQ_DATA           = (1 << 1), /* data write in progress */
  82        REQ_FSEQ_POSTFLUSH      = (1 << 2), /* post-flushing in progress */
  83        REQ_FSEQ_DONE           = (1 << 3),
  84
  85        REQ_FSEQ_ACTIONS        = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
  86                                  REQ_FSEQ_POSTFLUSH,
  87
  88        /*
  89         * If flush has been pending longer than the following timeout,
  90         * it's issued even if flush_data requests are still in flight.
  91         */
  92        FLUSH_PENDING_TIMEOUT   = 5 * HZ,
  93};
  94
  95static void blk_kick_flush(struct request_queue *q,
  96                           struct blk_flush_queue *fq, unsigned int flags);
  97
  98static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
  99{
 100        unsigned int policy = 0;
 101
 102        if (blk_rq_sectors(rq))
 103                policy |= REQ_FSEQ_DATA;
 104
 105        if (fflags & (1UL << QUEUE_FLAG_WC)) {
 106                if (rq->cmd_flags & REQ_PREFLUSH)
 107                        policy |= REQ_FSEQ_PREFLUSH;
 108                if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
 109                    (rq->cmd_flags & REQ_FUA))
 110                        policy |= REQ_FSEQ_POSTFLUSH;
 111        }
 112        return policy;
 113}
 114
 115static unsigned int blk_flush_cur_seq(struct request *rq)
 116{
 117        return 1 << ffz(rq->flush.seq);
 118}
 119
 120static void blk_flush_restore_request(struct request *rq)
 121{
 122        /*
 123         * After flush data completion, @rq->bio is %NULL but we need to
 124         * complete the bio again.  @rq->biotail is guaranteed to equal the
 125         * original @rq->bio.  Restore it.
 126         */
 127        rq->bio = rq->biotail;
 128
 129        /* make @rq a normal request */
 130        rq->rq_flags &= ~RQF_FLUSH_SEQ;
 131        rq->end_io = rq->flush.saved_end_io;
 132}
 133
 134static void blk_flush_queue_rq(struct request *rq, bool add_front)
 135{
 136        blk_mq_add_to_requeue_list(rq, add_front, true);
 137}
 138
 139static void blk_account_io_flush(struct request *rq)
 140{
 141        struct block_device *part = rq->rq_disk->part0;
 142
 143        part_stat_lock();
 144        part_stat_inc(part, ios[STAT_FLUSH]);
 145        part_stat_add(part, nsecs[STAT_FLUSH],
 146                      ktime_get_ns() - rq->start_time_ns);
 147        part_stat_unlock();
 148}
 149
 150/**
 151 * blk_flush_complete_seq - complete flush sequence
 152 * @rq: PREFLUSH/FUA request being sequenced
 153 * @fq: flush queue
 154 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
 155 * @error: whether an error occurred
 156 *
 157 * @rq just completed @seq part of its flush sequence, record the
 158 * completion and trigger the next step.
 159 *
 160 * CONTEXT:
 161 * spin_lock_irq(fq->mq_flush_lock)
 162 */
 163static void blk_flush_complete_seq(struct request *rq,
 164                                   struct blk_flush_queue *fq,
 165                                   unsigned int seq, blk_status_t error)
 166{
 167        struct request_queue *q = rq->q;
 168        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
 169        unsigned int cmd_flags;
 170
 171        BUG_ON(rq->flush.seq & seq);
 172        rq->flush.seq |= seq;
 173        cmd_flags = rq->cmd_flags;
 174
 175        if (likely(!error))
 176                seq = blk_flush_cur_seq(rq);
 177        else
 178                seq = REQ_FSEQ_DONE;
 179
 180        switch (seq) {
 181        case REQ_FSEQ_PREFLUSH:
 182        case REQ_FSEQ_POSTFLUSH:
 183                /* queue for flush */
 184                if (list_empty(pending))
 185                        fq->flush_pending_since = jiffies;
 186                list_move_tail(&rq->flush.list, pending);
 187                break;
 188
 189        case REQ_FSEQ_DATA:
 190                list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
 191                blk_flush_queue_rq(rq, true);
 192                break;
 193
 194        case REQ_FSEQ_DONE:
 195                /*
 196                 * @rq was previously adjusted by blk_insert_flush() for
 197                 * flush sequencing and may already have gone through the
 198                 * flush data request completion path.  Restore @rq for
 199                 * normal completion and end it.
 200                 */
 201                BUG_ON(!list_empty(&rq->queuelist));
 202                list_del_init(&rq->flush.list);
 203                blk_flush_restore_request(rq);
 204                blk_mq_end_request(rq, error);
 205                break;
 206
 207        default:
 208                BUG();
 209        }
 210
 211        blk_kick_flush(q, fq, cmd_flags);
 212}
 213
 214static void flush_end_io(struct request *flush_rq, blk_status_t error)
 215{
 216        struct request_queue *q = flush_rq->q;
 217        struct list_head *running;
 218        struct request *rq, *n;
 219        unsigned long flags = 0;
 220        struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
 221
 222        /* release the tag's ownership to the req cloned from */
 223        spin_lock_irqsave(&fq->mq_flush_lock, flags);
 224
 225        if (!refcount_dec_and_test(&flush_rq->ref)) {
 226                fq->rq_status = error;
 227                spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 228                return;
 229        }
 230
 231        blk_account_io_flush(flush_rq);
 232        /*
 233         * Flush request has to be marked as IDLE when it is really ended
 234         * because its .end_io() is called from timeout code path too for
 235         * avoiding use-after-free.
 236         */
 237        WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
 238        if (fq->rq_status != BLK_STS_OK)
 239                error = fq->rq_status;
 240
 241        if (!q->elevator) {
 242                flush_rq->tag = BLK_MQ_NO_TAG;
 243        } else {
 244                blk_mq_put_driver_tag(flush_rq);
 245                flush_rq->internal_tag = BLK_MQ_NO_TAG;
 246        }
 247
 248        running = &fq->flush_queue[fq->flush_running_idx];
 249        BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
 250
 251        /* account completion of the flush request */
 252        fq->flush_running_idx ^= 1;
 253
 254        /* and push the waiting requests to the next stage */
 255        list_for_each_entry_safe(rq, n, running, flush.list) {
 256                unsigned int seq = blk_flush_cur_seq(rq);
 257
 258                BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
 259                blk_flush_complete_seq(rq, fq, seq, error);
 260        }
 261
 262        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 263}
 264
 265bool is_flush_rq(struct request *rq)
 266{
 267        return rq->end_io == flush_end_io;
 268}
 269
 270/**
 271 * blk_kick_flush - consider issuing flush request
 272 * @q: request_queue being kicked
 273 * @fq: flush queue
 274 * @flags: cmd_flags of the original request
 275 *
 276 * Flush related states of @q have changed, consider issuing flush request.
 277 * Please read the comment at the top of this file for more info.
 278 *
 279 * CONTEXT:
 280 * spin_lock_irq(fq->mq_flush_lock)
 281 *
 282 */
 283static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
 284                           unsigned int flags)
 285{
 286        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
 287        struct request *first_rq =
 288                list_first_entry(pending, struct request, flush.list);
 289        struct request *flush_rq = fq->flush_rq;
 290
 291        /* C1 described at the top of this file */
 292        if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
 293                return;
 294
 295        /* C2 and C3 */
 296        if (!list_empty(&fq->flush_data_in_flight) &&
 297            time_before(jiffies,
 298                        fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
 299                return;
 300
 301        /*
 302         * Issue flush and toggle pending_idx.  This makes pending_idx
 303         * different from running_idx, which means flush is in flight.
 304         */
 305        fq->flush_pending_idx ^= 1;
 306
 307        blk_rq_init(q, flush_rq);
 308
 309        /*
 310         * In case of none scheduler, borrow tag from the first request
 311         * since they can't be in flight at the same time. And acquire
 312         * the tag's ownership for flush req.
 313         *
 314         * In case of IO scheduler, flush rq need to borrow scheduler tag
 315         * just for cheating put/get driver tag.
 316         */
 317        flush_rq->mq_ctx = first_rq->mq_ctx;
 318        flush_rq->mq_hctx = first_rq->mq_hctx;
 319
 320        if (!q->elevator) {
 321                flush_rq->tag = first_rq->tag;
 322
 323                /*
 324                 * We borrow data request's driver tag, so have to mark
 325                 * this flush request as INFLIGHT for avoiding double
 326                 * account of this driver tag
 327                 */
 328                flush_rq->rq_flags |= RQF_MQ_INFLIGHT;
 329        } else
 330                flush_rq->internal_tag = first_rq->internal_tag;
 331
 332        flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
 333        flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
 334        flush_rq->rq_flags |= RQF_FLUSH_SEQ;
 335        flush_rq->rq_disk = first_rq->rq_disk;
 336        flush_rq->end_io = flush_end_io;
 337        /*
 338         * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
 339         * implied in refcount_inc_not_zero() called from
 340         * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
 341         * and READ flush_rq->end_io
 342         */
 343        smp_wmb();
 344        refcount_set(&flush_rq->ref, 1);
 345
 346        blk_flush_queue_rq(flush_rq, false);
 347}
 348
 349static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 350{
 351        struct request_queue *q = rq->q;
 352        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 353        struct blk_mq_ctx *ctx = rq->mq_ctx;
 354        unsigned long flags;
 355        struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
 356
 357        if (q->elevator) {
 358                WARN_ON(rq->tag < 0);
 359                blk_mq_put_driver_tag(rq);
 360        }
 361
 362        /*
 363         * After populating an empty queue, kick it to avoid stall.  Read
 364         * the comment in flush_end_io().
 365         */
 366        spin_lock_irqsave(&fq->mq_flush_lock, flags);
 367        blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
 368        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 369
 370        blk_mq_sched_restart(hctx);
 371}
 372
 373/**
 374 * blk_insert_flush - insert a new PREFLUSH/FUA request
 375 * @rq: request to insert
 376 *
 377 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
 378 * or __blk_mq_run_hw_queue() to dispatch request.
 379 * @rq is being submitted.  Analyze what needs to be done and put it on the
 380 * right queue.
 381 */
 382void blk_insert_flush(struct request *rq)
 383{
 384        struct request_queue *q = rq->q;
 385        unsigned long fflags = q->queue_flags;  /* may change, cache */
 386        unsigned int policy = blk_flush_policy(fflags, rq);
 387        struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 388
 389        /*
 390         * @policy now records what operations need to be done.  Adjust
 391         * REQ_PREFLUSH and FUA for the driver.
 392         */
 393        rq->cmd_flags &= ~REQ_PREFLUSH;
 394        if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
 395                rq->cmd_flags &= ~REQ_FUA;
 396
 397        /*
 398         * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
 399         * of those flags, we have to set REQ_SYNC to avoid skewing
 400         * the request accounting.
 401         */
 402        rq->cmd_flags |= REQ_SYNC;
 403
 404        /*
 405         * An empty flush handed down from a stacking driver may
 406         * translate into nothing if the underlying device does not
 407         * advertise a write-back cache.  In this case, simply
 408         * complete the request.
 409         */
 410        if (!policy) {
 411                blk_mq_end_request(rq, 0);
 412                return;
 413        }
 414
 415        BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
 416
 417        /*
 418         * If there's data but flush is not necessary, the request can be
 419         * processed directly without going through flush machinery.  Queue
 420         * for normal execution.
 421         */
 422        if ((policy & REQ_FSEQ_DATA) &&
 423            !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
 424                blk_mq_request_bypass_insert(rq, false, false);
 425                return;
 426        }
 427
 428        /*
 429         * @rq should go through flush machinery.  Mark it part of flush
 430         * sequence and submit for further processing.
 431         */
 432        memset(&rq->flush, 0, sizeof(rq->flush));
 433        INIT_LIST_HEAD(&rq->flush.list);
 434        rq->rq_flags |= RQF_FLUSH_SEQ;
 435        rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
 436
 437        rq->end_io = mq_flush_data_end_io;
 438
 439        spin_lock_irq(&fq->mq_flush_lock);
 440        blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
 441        spin_unlock_irq(&fq->mq_flush_lock);
 442}
 443
 444/**
 445 * blkdev_issue_flush - queue a flush
 446 * @bdev:       blockdev to issue flush for
 447 *
 448 * Description:
 449 *    Issue a flush for the block device in question.
 450 */
 451int blkdev_issue_flush(struct block_device *bdev)
 452{
 453        struct bio bio;
 454
 455        bio_init(&bio, NULL, 0);
 456        bio_set_dev(&bio, bdev);
 457        bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 458        return submit_bio_wait(&bio);
 459}
 460EXPORT_SYMBOL(blkdev_issue_flush);
 461
 462struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
 463                                              gfp_t flags)
 464{
 465        struct blk_flush_queue *fq;
 466        int rq_sz = sizeof(struct request);
 467
 468        fq = kzalloc_node(sizeof(*fq), flags, node);
 469        if (!fq)
 470                goto fail;
 471
 472        spin_lock_init(&fq->mq_flush_lock);
 473
 474        rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
 475        fq->flush_rq = kzalloc_node(rq_sz, flags, node);
 476        if (!fq->flush_rq)
 477                goto fail_rq;
 478
 479        INIT_LIST_HEAD(&fq->flush_queue[0]);
 480        INIT_LIST_HEAD(&fq->flush_queue[1]);
 481        INIT_LIST_HEAD(&fq->flush_data_in_flight);
 482
 483        return fq;
 484
 485 fail_rq:
 486        kfree(fq);
 487 fail:
 488        return NULL;
 489}
 490
 491void blk_free_flush_queue(struct blk_flush_queue *fq)
 492{
 493        /* bio based request queue hasn't flush queue */
 494        if (!fq)
 495                return;
 496
 497        kfree(fq->flush_rq);
 498        kfree(fq);
 499}
 500
 501/*
 502 * Allow driver to set its own lock class to fq->mq_flush_lock for
 503 * avoiding lockdep complaint.
 504 *
 505 * flush_end_io() may be called recursively from some driver, such as
 506 * nvme-loop, so lockdep may complain 'possible recursive locking' because
 507 * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
 508 * key. We need to assign different lock class for these driver's
 509 * fq->mq_flush_lock for avoiding the lockdep warning.
 510 *
 511 * Use dynamically allocated lock class key for each 'blk_flush_queue'
 512 * instance is over-kill, and more worse it introduces horrible boot delay
 513 * issue because synchronize_rcu() is implied in lockdep_unregister_key which
 514 * is called for each hctx release. SCSI probing may synchronously create and
 515 * destroy lots of MQ request_queues for non-existent devices, and some robot
 516 * test kernel always enable lockdep option. It is observed that more than half
 517 * an hour is taken during SCSI MQ probe with per-fq lock class.
 518 */
 519void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
 520                struct lock_class_key *key)
 521{
 522        lockdep_set_class(&hctx->fq->mq_flush_lock, key);
 523}
 524EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
 525