linux/block/blk-flush.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions to sequence PREFLUSH and FUA writes.
   4 *
   5 * Copyright (C) 2011           Max Planck Institute for Gravitational Physics
   6 * Copyright (C) 2011           Tejun Heo <tj@kernel.org>
   7 *
   8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
   9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
  10 * properties and hardware capability.
  11 *
  12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
  13 * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
  14 * that the device cache should be flushed before the data is executed, and
  15 * REQ_FUA means that the data must be on non-volatile media on request
  16 * completion.
  17 *
  18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
  19 * difference.  The requests are either completed immediately if there's no data
  20 * or executed as normal requests otherwise.
  21 *
  22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
  23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
  24 *
  25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
  26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
  27 *
  28 * The actual execution of flush is double buffered.  Whenever a request
  29 * needs to execute PRE or POSTFLUSH, it queues at
  30 * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
  31 * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
  32 * completes, all the requests which were pending are proceeded to the next
  33 * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
  34 * requests.
  35 *
  36 * Currently, the following conditions are used to determine when to issue
  37 * flush.
  38 *
  39 * C1. At any given time, only one flush shall be in progress.  This makes
  40 *     double buffering sufficient.
  41 *
  42 * C2. Flush is deferred if any request is executing DATA of its sequence.
  43 *     This avoids issuing separate POSTFLUSHes for requests which shared
  44 *     PREFLUSH.
  45 *
  46 * C3. The second condition is ignored if there is a request which has
  47 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
  48 *     starvation in the unlikely case where there are continuous stream of
  49 *     FUA (without PREFLUSH) requests.
  50 *
  51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
  52 * is beneficial.
  53 *
  54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
  55 * Once while executing DATA and again after the whole sequence is
  56 * complete.  The first completion updates the contained bio but doesn't
  57 * finish it so that the bio submitter is notified only after the whole
  58 * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
  59 * req_bio_endio().
  60 *
  61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
  62 * bio attached to it, which is guaranteed as they aren't allowed to be
  63 * merged in the usual way.
  64 */
  65
  66#include <linux/kernel.h>
  67#include <linux/module.h>
  68#include <linux/bio.h>
  69#include <linux/blkdev.h>
  70#include <linux/gfp.h>
  71#include <linux/blk-mq.h>
  72
  73#include "blk.h"
  74#include "blk-mq.h"
  75#include "blk-mq-tag.h"
  76#include "blk-mq-sched.h"
  77
  78/* PREFLUSH/FUA sequences */
  79enum {
  80        REQ_FSEQ_PREFLUSH       = (1 << 0), /* pre-flushing in progress */
  81        REQ_FSEQ_DATA           = (1 << 1), /* data write in progress */
  82        REQ_FSEQ_POSTFLUSH      = (1 << 2), /* post-flushing in progress */
  83        REQ_FSEQ_DONE           = (1 << 3),
  84
  85        REQ_FSEQ_ACTIONS        = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
  86                                  REQ_FSEQ_POSTFLUSH,
  87
  88        /*
  89         * If flush has been pending longer than the following timeout,
  90         * it's issued even if flush_data requests are still in flight.
  91         */
  92        FLUSH_PENDING_TIMEOUT   = 5 * HZ,
  93};
  94
  95static void blk_kick_flush(struct request_queue *q,
  96                           struct blk_flush_queue *fq, unsigned int flags);
  97
  98static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
  99{
 100        unsigned int policy = 0;
 101
 102        if (blk_rq_sectors(rq))
 103                policy |= REQ_FSEQ_DATA;
 104
 105        if (fflags & (1UL << QUEUE_FLAG_WC)) {
 106                if (rq->cmd_flags & REQ_PREFLUSH)
 107                        policy |= REQ_FSEQ_PREFLUSH;
 108                if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
 109                    (rq->cmd_flags & REQ_FUA))
 110                        policy |= REQ_FSEQ_POSTFLUSH;
 111        }
 112        return policy;
 113}
 114
 115static unsigned int blk_flush_cur_seq(struct request *rq)
 116{
 117        return 1 << ffz(rq->flush.seq);
 118}
 119
 120static void blk_flush_restore_request(struct request *rq)
 121{
 122        /*
 123         * After flush data completion, @rq->bio is %NULL but we need to
 124         * complete the bio again.  @rq->biotail is guaranteed to equal the
 125         * original @rq->bio.  Restore it.
 126         */
 127        rq->bio = rq->biotail;
 128
 129        /* make @rq a normal request */
 130        rq->rq_flags &= ~RQF_FLUSH_SEQ;
 131        rq->end_io = rq->flush.saved_end_io;
 132}
 133
 134static void blk_flush_queue_rq(struct request *rq, bool add_front)
 135{
 136        blk_mq_add_to_requeue_list(rq, add_front, true);
 137}
 138
 139/**
 140 * blk_flush_complete_seq - complete flush sequence
 141 * @rq: PREFLUSH/FUA request being sequenced
 142 * @fq: flush queue
 143 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
 144 * @error: whether an error occurred
 145 *
 146 * @rq just completed @seq part of its flush sequence, record the
 147 * completion and trigger the next step.
 148 *
 149 * CONTEXT:
 150 * spin_lock_irq(fq->mq_flush_lock)
 151 *
 152 * RETURNS:
 153 * %true if requests were added to the dispatch queue, %false otherwise.
 154 */
 155static void blk_flush_complete_seq(struct request *rq,
 156                                   struct blk_flush_queue *fq,
 157                                   unsigned int seq, blk_status_t error)
 158{
 159        struct request_queue *q = rq->q;
 160        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
 161        unsigned int cmd_flags;
 162
 163        BUG_ON(rq->flush.seq & seq);
 164        rq->flush.seq |= seq;
 165        cmd_flags = rq->cmd_flags;
 166
 167        if (likely(!error))
 168                seq = blk_flush_cur_seq(rq);
 169        else
 170                seq = REQ_FSEQ_DONE;
 171
 172        switch (seq) {
 173        case REQ_FSEQ_PREFLUSH:
 174        case REQ_FSEQ_POSTFLUSH:
 175                /* queue for flush */
 176                if (list_empty(pending))
 177                        fq->flush_pending_since = jiffies;
 178                list_move_tail(&rq->flush.list, pending);
 179                break;
 180
 181        case REQ_FSEQ_DATA:
 182                list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
 183                blk_flush_queue_rq(rq, true);
 184                break;
 185
 186        case REQ_FSEQ_DONE:
 187                /*
 188                 * @rq was previously adjusted by blk_flush_issue() for
 189                 * flush sequencing and may already have gone through the
 190                 * flush data request completion path.  Restore @rq for
 191                 * normal completion and end it.
 192                 */
 193                BUG_ON(!list_empty(&rq->queuelist));
 194                list_del_init(&rq->flush.list);
 195                blk_flush_restore_request(rq);
 196                blk_mq_end_request(rq, error);
 197                break;
 198
 199        default:
 200                BUG();
 201        }
 202
 203        blk_kick_flush(q, fq, cmd_flags);
 204}
 205
 206static void flush_end_io(struct request *flush_rq, blk_status_t error)
 207{
 208        struct request_queue *q = flush_rq->q;
 209        struct list_head *running;
 210        struct request *rq, *n;
 211        unsigned long flags = 0;
 212        struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
 213        struct blk_mq_hw_ctx *hctx;
 214
 215        /* release the tag's ownership to the req cloned from */
 216        spin_lock_irqsave(&fq->mq_flush_lock, flags);
 217
 218        if (!refcount_dec_and_test(&flush_rq->ref)) {
 219                fq->rq_status = error;
 220                spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 221                return;
 222        }
 223
 224        if (fq->rq_status != BLK_STS_OK)
 225                error = fq->rq_status;
 226
 227        hctx = flush_rq->mq_hctx;
 228        if (!q->elevator) {
 229                blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
 230                flush_rq->tag = -1;
 231        } else {
 232                blk_mq_put_driver_tag(flush_rq);
 233                flush_rq->internal_tag = -1;
 234        }
 235
 236        running = &fq->flush_queue[fq->flush_running_idx];
 237        BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
 238
 239        /* account completion of the flush request */
 240        fq->flush_running_idx ^= 1;
 241
 242        /* and push the waiting requests to the next stage */
 243        list_for_each_entry_safe(rq, n, running, flush.list) {
 244                unsigned int seq = blk_flush_cur_seq(rq);
 245
 246                BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
 247                blk_flush_complete_seq(rq, fq, seq, error);
 248        }
 249
 250        fq->flush_queue_delayed = 0;
 251        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 252}
 253
 254/**
 255 * blk_kick_flush - consider issuing flush request
 256 * @q: request_queue being kicked
 257 * @fq: flush queue
 258 * @flags: cmd_flags of the original request
 259 *
 260 * Flush related states of @q have changed, consider issuing flush request.
 261 * Please read the comment at the top of this file for more info.
 262 *
 263 * CONTEXT:
 264 * spin_lock_irq(fq->mq_flush_lock)
 265 *
 266 */
 267static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
 268                           unsigned int flags)
 269{
 270        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
 271        struct request *first_rq =
 272                list_first_entry(pending, struct request, flush.list);
 273        struct request *flush_rq = fq->flush_rq;
 274
 275        /* C1 described at the top of this file */
 276        if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
 277                return;
 278
 279        /* C2 and C3
 280         *
 281         * For blk-mq + scheduling, we can risk having all driver tags
 282         * assigned to empty flushes, and we deadlock if we are expecting
 283         * other requests to make progress. Don't defer for that case.
 284         */
 285        if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
 286            time_before(jiffies,
 287                        fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
 288                return;
 289
 290        /*
 291         * Issue flush and toggle pending_idx.  This makes pending_idx
 292         * different from running_idx, which means flush is in flight.
 293         */
 294        fq->flush_pending_idx ^= 1;
 295
 296        blk_rq_init(q, flush_rq);
 297
 298        /*
 299         * In case of none scheduler, borrow tag from the first request
 300         * since they can't be in flight at the same time. And acquire
 301         * the tag's ownership for flush req.
 302         *
 303         * In case of IO scheduler, flush rq need to borrow scheduler tag
 304         * just for cheating put/get driver tag.
 305         */
 306        flush_rq->mq_ctx = first_rq->mq_ctx;
 307        flush_rq->mq_hctx = first_rq->mq_hctx;
 308
 309        if (!q->elevator) {
 310                fq->orig_rq = first_rq;
 311                flush_rq->tag = first_rq->tag;
 312                blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
 313        } else {
 314                flush_rq->internal_tag = first_rq->internal_tag;
 315        }
 316
 317        flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
 318        flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
 319        flush_rq->rq_flags |= RQF_FLUSH_SEQ;
 320        flush_rq->rq_disk = first_rq->rq_disk;
 321        flush_rq->end_io = flush_end_io;
 322
 323        blk_flush_queue_rq(flush_rq, false);
 324}
 325
 326static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 327{
 328        struct request_queue *q = rq->q;
 329        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 330        struct blk_mq_ctx *ctx = rq->mq_ctx;
 331        unsigned long flags;
 332        struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
 333
 334        if (q->elevator) {
 335                WARN_ON(rq->tag < 0);
 336                blk_mq_put_driver_tag(rq);
 337        }
 338
 339        /*
 340         * After populating an empty queue, kick it to avoid stall.  Read
 341         * the comment in flush_end_io().
 342         */
 343        spin_lock_irqsave(&fq->mq_flush_lock, flags);
 344        blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
 345        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 346
 347        blk_mq_sched_restart(hctx);
 348}
 349
 350/**
 351 * blk_insert_flush - insert a new PREFLUSH/FUA request
 352 * @rq: request to insert
 353 *
 354 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
 355 * or __blk_mq_run_hw_queue() to dispatch request.
 356 * @rq is being submitted.  Analyze what needs to be done and put it on the
 357 * right queue.
 358 */
 359void blk_insert_flush(struct request *rq)
 360{
 361        struct request_queue *q = rq->q;
 362        unsigned long fflags = q->queue_flags;  /* may change, cache */
 363        unsigned int policy = blk_flush_policy(fflags, rq);
 364        struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 365
 366        /*
 367         * @policy now records what operations need to be done.  Adjust
 368         * REQ_PREFLUSH and FUA for the driver.
 369         */
 370        rq->cmd_flags &= ~REQ_PREFLUSH;
 371        if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
 372                rq->cmd_flags &= ~REQ_FUA;
 373
 374        /*
 375         * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
 376         * of those flags, we have to set REQ_SYNC to avoid skewing
 377         * the request accounting.
 378         */
 379        rq->cmd_flags |= REQ_SYNC;
 380
 381        /*
 382         * An empty flush handed down from a stacking driver may
 383         * translate into nothing if the underlying device does not
 384         * advertise a write-back cache.  In this case, simply
 385         * complete the request.
 386         */
 387        if (!policy) {
 388                blk_mq_end_request(rq, 0);
 389                return;
 390        }
 391
 392        BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
 393
 394        /*
 395         * If there's data but flush is not necessary, the request can be
 396         * processed directly without going through flush machinery.  Queue
 397         * for normal execution.
 398         */
 399        if ((policy & REQ_FSEQ_DATA) &&
 400            !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
 401                blk_mq_request_bypass_insert(rq, false);
 402                return;
 403        }
 404
 405        /*
 406         * @rq should go through flush machinery.  Mark it part of flush
 407         * sequence and submit for further processing.
 408         */
 409        memset(&rq->flush, 0, sizeof(rq->flush));
 410        INIT_LIST_HEAD(&rq->flush.list);
 411        rq->rq_flags |= RQF_FLUSH_SEQ;
 412        rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
 413
 414        rq->end_io = mq_flush_data_end_io;
 415
 416        spin_lock_irq(&fq->mq_flush_lock);
 417        blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
 418        spin_unlock_irq(&fq->mq_flush_lock);
 419}
 420
 421/**
 422 * blkdev_issue_flush - queue a flush
 423 * @bdev:       blockdev to issue flush for
 424 * @gfp_mask:   memory allocation flags (for bio_alloc)
 425 * @error_sector:       error sector
 426 *
 427 * Description:
 428 *    Issue a flush for the block device in question. Caller can supply
 429 *    room for storing the error offset in case of a flush error, if they
 430 *    wish to.
 431 */
 432int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 433                sector_t *error_sector)
 434{
 435        struct request_queue *q;
 436        struct bio *bio;
 437        int ret = 0;
 438
 439        if (bdev->bd_disk == NULL)
 440                return -ENXIO;
 441
 442        q = bdev_get_queue(bdev);
 443        if (!q)
 444                return -ENXIO;
 445
 446        /*
 447         * some block devices may not have their queue correctly set up here
 448         * (e.g. loop device without a backing file) and so issuing a flush
 449         * here will panic. Ensure there is a request function before issuing
 450         * the flush.
 451         */
 452        if (!q->make_request_fn)
 453                return -ENXIO;
 454
 455        bio = bio_alloc(gfp_mask, 0);
 456        bio_set_dev(bio, bdev);
 457        bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 458
 459        ret = submit_bio_wait(bio);
 460
 461        /*
 462         * The driver must store the error location in ->bi_sector, if
 463         * it supports it. For non-stacked drivers, this should be
 464         * copied from blk_rq_pos(rq).
 465         */
 466        if (error_sector)
 467                *error_sector = bio->bi_iter.bi_sector;
 468
 469        bio_put(bio);
 470        return ret;
 471}
 472EXPORT_SYMBOL(blkdev_issue_flush);
 473
 474struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
 475                int node, int cmd_size, gfp_t flags)
 476{
 477        struct blk_flush_queue *fq;
 478        int rq_sz = sizeof(struct request);
 479
 480        fq = kzalloc_node(sizeof(*fq), flags, node);
 481        if (!fq)
 482                goto fail;
 483
 484        spin_lock_init(&fq->mq_flush_lock);
 485
 486        rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
 487        fq->flush_rq = kzalloc_node(rq_sz, flags, node);
 488        if (!fq->flush_rq)
 489                goto fail_rq;
 490
 491        INIT_LIST_HEAD(&fq->flush_queue[0]);
 492        INIT_LIST_HEAD(&fq->flush_queue[1]);
 493        INIT_LIST_HEAD(&fq->flush_data_in_flight);
 494
 495        return fq;
 496
 497 fail_rq:
 498        kfree(fq);
 499 fail:
 500        return NULL;
 501}
 502
 503void blk_free_flush_queue(struct blk_flush_queue *fq)
 504{
 505        /* bio based request queue hasn't flush queue */
 506        if (!fq)
 507                return;
 508
 509        kfree(fq->flush_rq);
 510        kfree(fq);
 511}
 512