linux/block/blk-wbt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * buffered writeback throttling. loosely based on CoDel. We can't drop
   4 * packets for IO scheduling, so the logic is something like this:
   5 *
   6 * - Monitor latencies in a defined window of time.
   7 * - If the minimum latency in the above window exceeds some target, increment
   8 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
   9 *   window is then shrunk to 100 / sqrt(scaling step + 1).
  10 * - For any window where we don't have solid data on what the latencies
  11 *   look like, retain status quo.
  12 * - If latencies look good, decrement scaling step.
  13 * - If we're only doing writes, allow the scaling step to go negative. This
  14 *   will temporarily boost write performance, snapping back to a stable
  15 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
  16 *   positive scaling steps where we shrink the monitoring window, a negative
  17 *   scaling step retains the default step==0 window size.
  18 *
  19 * Copyright (C) 2016 Jens Axboe
  20 *
  21 */
  22#include <linux/kernel.h>
  23#include <linux/blk_types.h>
  24#include <linux/slab.h>
  25#include <linux/backing-dev.h>
  26#include <linux/swap.h>
  27
  28#include "blk-wbt.h"
  29#include "blk-rq-qos.h"
  30
  31#define CREATE_TRACE_POINTS
  32#include <trace/events/wbt.h>
  33
  34static inline void wbt_clear_state(struct request *rq)
  35{
  36        rq->wbt_flags = 0;
  37}
  38
  39static inline enum wbt_flags wbt_flags(struct request *rq)
  40{
  41        return rq->wbt_flags;
  42}
  43
  44static inline bool wbt_is_tracked(struct request *rq)
  45{
  46        return rq->wbt_flags & WBT_TRACKED;
  47}
  48
  49static inline bool wbt_is_read(struct request *rq)
  50{
  51        return rq->wbt_flags & WBT_READ;
  52}
  53
  54enum {
  55        /*
  56         * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
  57         * from here depending on device stats
  58         */
  59        RWB_DEF_DEPTH   = 16,
  60
  61        /*
  62         * 100msec window
  63         */
  64        RWB_WINDOW_NSEC         = 100 * 1000 * 1000ULL,
  65
  66        /*
  67         * Disregard stats, if we don't meet this minimum
  68         */
  69        RWB_MIN_WRITE_SAMPLES   = 3,
  70
  71        /*
  72         * If we have this number of consecutive windows with not enough
  73         * information to scale up or down, scale up.
  74         */
  75        RWB_UNKNOWN_BUMP        = 5,
  76};
  77
  78static inline bool rwb_enabled(struct rq_wb *rwb)
  79{
  80        return rwb && rwb->wb_normal != 0;
  81}
  82
  83static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
  84{
  85        if (rwb_enabled(rwb)) {
  86                const unsigned long cur = jiffies;
  87
  88                if (cur != *var)
  89                        *var = cur;
  90        }
  91}
  92
  93/*
  94 * If a task was rate throttled in balance_dirty_pages() within the last
  95 * second or so, use that to indicate a higher cleaning rate.
  96 */
  97static bool wb_recent_wait(struct rq_wb *rwb)
  98{
  99        struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
 100
 101        return time_before(jiffies, wb->dirty_sleep + HZ);
 102}
 103
 104static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
 105                                          enum wbt_flags wb_acct)
 106{
 107        if (wb_acct & WBT_KSWAPD)
 108                return &rwb->rq_wait[WBT_RWQ_KSWAPD];
 109        else if (wb_acct & WBT_DISCARD)
 110                return &rwb->rq_wait[WBT_RWQ_DISCARD];
 111
 112        return &rwb->rq_wait[WBT_RWQ_BG];
 113}
 114
 115static void rwb_wake_all(struct rq_wb *rwb)
 116{
 117        int i;
 118
 119        for (i = 0; i < WBT_NUM_RWQ; i++) {
 120                struct rq_wait *rqw = &rwb->rq_wait[i];
 121
 122                if (wq_has_sleeper(&rqw->wait))
 123                        wake_up_all(&rqw->wait);
 124        }
 125}
 126
 127static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
 128                         enum wbt_flags wb_acct)
 129{
 130        int inflight, limit;
 131
 132        inflight = atomic_dec_return(&rqw->inflight);
 133
 134        /*
 135         * wbt got disabled with IO in flight. Wake up any potential
 136         * waiters, we don't have to do more than that.
 137         */
 138        if (unlikely(!rwb_enabled(rwb))) {
 139                rwb_wake_all(rwb);
 140                return;
 141        }
 142
 143        /*
 144         * For discards, our limit is always the background. For writes, if
 145         * the device does write back caching, drop further down before we
 146         * wake people up.
 147         */
 148        if (wb_acct & WBT_DISCARD)
 149                limit = rwb->wb_background;
 150        else if (rwb->wc && !wb_recent_wait(rwb))
 151                limit = 0;
 152        else
 153                limit = rwb->wb_normal;
 154
 155        /*
 156         * Don't wake anyone up if we are above the normal limit.
 157         */
 158        if (inflight && inflight >= limit)
 159                return;
 160
 161        if (wq_has_sleeper(&rqw->wait)) {
 162                int diff = limit - inflight;
 163
 164                if (!inflight || diff >= rwb->wb_background / 2)
 165                        wake_up_all(&rqw->wait);
 166        }
 167}
 168
 169static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
 170{
 171        struct rq_wb *rwb = RQWB(rqos);
 172        struct rq_wait *rqw;
 173
 174        if (!(wb_acct & WBT_TRACKED))
 175                return;
 176
 177        rqw = get_rq_wait(rwb, wb_acct);
 178        wbt_rqw_done(rwb, rqw, wb_acct);
 179}
 180
 181/*
 182 * Called on completion of a request. Note that it's also called when
 183 * a request is merged, when the request gets freed.
 184 */
 185static void wbt_done(struct rq_qos *rqos, struct request *rq)
 186{
 187        struct rq_wb *rwb = RQWB(rqos);
 188
 189        if (!wbt_is_tracked(rq)) {
 190                if (rwb->sync_cookie == rq) {
 191                        rwb->sync_issue = 0;
 192                        rwb->sync_cookie = NULL;
 193                }
 194
 195                if (wbt_is_read(rq))
 196                        wb_timestamp(rwb, &rwb->last_comp);
 197        } else {
 198                WARN_ON_ONCE(rq == rwb->sync_cookie);
 199                __wbt_done(rqos, wbt_flags(rq));
 200        }
 201        wbt_clear_state(rq);
 202}
 203
 204static inline bool stat_sample_valid(struct blk_rq_stat *stat)
 205{
 206        /*
 207         * We need at least one read sample, and a minimum of
 208         * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
 209         * that it's writes impacting us, and not just some sole read on
 210         * a device that is in a lower power state.
 211         */
 212        return (stat[READ].nr_samples >= 1 &&
 213                stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
 214}
 215
 216static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
 217{
 218        u64 now, issue = READ_ONCE(rwb->sync_issue);
 219
 220        if (!issue || !rwb->sync_cookie)
 221                return 0;
 222
 223        now = ktime_to_ns(ktime_get());
 224        return now - issue;
 225}
 226
 227enum {
 228        LAT_OK = 1,
 229        LAT_UNKNOWN,
 230        LAT_UNKNOWN_WRITES,
 231        LAT_EXCEEDED,
 232};
 233
 234static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
 235{
 236        struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
 237        struct rq_depth *rqd = &rwb->rq_depth;
 238        u64 thislat;
 239
 240        /*
 241         * If our stored sync issue exceeds the window size, or it
 242         * exceeds our min target AND we haven't logged any entries,
 243         * flag the latency as exceeded. wbt works off completion latencies,
 244         * but for a flooded device, a single sync IO can take a long time
 245         * to complete after being issued. If this time exceeds our
 246         * monitoring window AND we didn't see any other completions in that
 247         * window, then count that sync IO as a violation of the latency.
 248         */
 249        thislat = rwb_sync_issue_lat(rwb);
 250        if (thislat > rwb->cur_win_nsec ||
 251            (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
 252                trace_wbt_lat(bdi, thislat);
 253                return LAT_EXCEEDED;
 254        }
 255
 256        /*
 257         * No read/write mix, if stat isn't valid
 258         */
 259        if (!stat_sample_valid(stat)) {
 260                /*
 261                 * If we had writes in this stat window and the window is
 262                 * current, we're only doing writes. If a task recently
 263                 * waited or still has writes in flights, consider us doing
 264                 * just writes as well.
 265                 */
 266                if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
 267                    wbt_inflight(rwb))
 268                        return LAT_UNKNOWN_WRITES;
 269                return LAT_UNKNOWN;
 270        }
 271
 272        /*
 273         * If the 'min' latency exceeds our target, step down.
 274         */
 275        if (stat[READ].min > rwb->min_lat_nsec) {
 276                trace_wbt_lat(bdi, stat[READ].min);
 277                trace_wbt_stat(bdi, stat);
 278                return LAT_EXCEEDED;
 279        }
 280
 281        if (rqd->scale_step)
 282                trace_wbt_stat(bdi, stat);
 283
 284        return LAT_OK;
 285}
 286
 287static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
 288{
 289        struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
 290        struct rq_depth *rqd = &rwb->rq_depth;
 291
 292        trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
 293                        rwb->wb_background, rwb->wb_normal, rqd->max_depth);
 294}
 295
 296static void calc_wb_limits(struct rq_wb *rwb)
 297{
 298        if (rwb->min_lat_nsec == 0) {
 299                rwb->wb_normal = rwb->wb_background = 0;
 300        } else if (rwb->rq_depth.max_depth <= 2) {
 301                rwb->wb_normal = rwb->rq_depth.max_depth;
 302                rwb->wb_background = 1;
 303        } else {
 304                rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
 305                rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
 306        }
 307}
 308
 309static void scale_up(struct rq_wb *rwb)
 310{
 311        rq_depth_scale_up(&rwb->rq_depth);
 312        calc_wb_limits(rwb);
 313        rwb->unknown_cnt = 0;
 314        rwb_wake_all(rwb);
 315        rwb_trace_step(rwb, "scale up");
 316}
 317
 318static void scale_down(struct rq_wb *rwb, bool hard_throttle)
 319{
 320        rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
 321        calc_wb_limits(rwb);
 322        rwb->unknown_cnt = 0;
 323        rwb_trace_step(rwb, "scale down");
 324}
 325
 326static void rwb_arm_timer(struct rq_wb *rwb)
 327{
 328        struct rq_depth *rqd = &rwb->rq_depth;
 329
 330        if (rqd->scale_step > 0) {
 331                /*
 332                 * We should speed this up, using some variant of a fast
 333                 * integer inverse square root calculation. Since we only do
 334                 * this for every window expiration, it's not a huge deal,
 335                 * though.
 336                 */
 337                rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
 338                                        int_sqrt((rqd->scale_step + 1) << 8));
 339        } else {
 340                /*
 341                 * For step < 0, we don't want to increase/decrease the
 342                 * window size.
 343                 */
 344                rwb->cur_win_nsec = rwb->win_nsec;
 345        }
 346
 347        blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
 348}
 349
 350static void wb_timer_fn(struct blk_stat_callback *cb)
 351{
 352        struct rq_wb *rwb = cb->data;
 353        struct rq_depth *rqd = &rwb->rq_depth;
 354        unsigned int inflight = wbt_inflight(rwb);
 355        int status;
 356
 357        status = latency_exceeded(rwb, cb->stat);
 358
 359        trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
 360                        inflight);
 361
 362        /*
 363         * If we exceeded the latency target, step down. If we did not,
 364         * step one level up. If we don't know enough to say either exceeded
 365         * or ok, then don't do anything.
 366         */
 367        switch (status) {
 368        case LAT_EXCEEDED:
 369                scale_down(rwb, true);
 370                break;
 371        case LAT_OK:
 372                scale_up(rwb);
 373                break;
 374        case LAT_UNKNOWN_WRITES:
 375                /*
 376                 * We started a the center step, but don't have a valid
 377                 * read/write sample, but we do have writes going on.
 378                 * Allow step to go negative, to increase write perf.
 379                 */
 380                scale_up(rwb);
 381                break;
 382        case LAT_UNKNOWN:
 383                if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
 384                        break;
 385                /*
 386                 * We get here when previously scaled reduced depth, and we
 387                 * currently don't have a valid read/write sample. For that
 388                 * case, slowly return to center state (step == 0).
 389                 */
 390                if (rqd->scale_step > 0)
 391                        scale_up(rwb);
 392                else if (rqd->scale_step < 0)
 393                        scale_down(rwb, false);
 394                break;
 395        default:
 396                break;
 397        }
 398
 399        /*
 400         * Re-arm timer, if we have IO in flight
 401         */
 402        if (rqd->scale_step || inflight)
 403                rwb_arm_timer(rwb);
 404}
 405
 406static void __wbt_update_limits(struct rq_wb *rwb)
 407{
 408        struct rq_depth *rqd = &rwb->rq_depth;
 409
 410        rqd->scale_step = 0;
 411        rqd->scaled_max = false;
 412
 413        rq_depth_calc_max_depth(rqd);
 414        calc_wb_limits(rwb);
 415
 416        rwb_wake_all(rwb);
 417}
 418
 419void wbt_update_limits(struct request_queue *q)
 420{
 421        struct rq_qos *rqos = wbt_rq_qos(q);
 422        if (!rqos)
 423                return;
 424        __wbt_update_limits(RQWB(rqos));
 425}
 426
 427u64 wbt_get_min_lat(struct request_queue *q)
 428{
 429        struct rq_qos *rqos = wbt_rq_qos(q);
 430        if (!rqos)
 431                return 0;
 432        return RQWB(rqos)->min_lat_nsec;
 433}
 434
 435void wbt_set_min_lat(struct request_queue *q, u64 val)
 436{
 437        struct rq_qos *rqos = wbt_rq_qos(q);
 438        if (!rqos)
 439                return;
 440        RQWB(rqos)->min_lat_nsec = val;
 441        RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
 442        __wbt_update_limits(RQWB(rqos));
 443}
 444
 445
 446static bool close_io(struct rq_wb *rwb)
 447{
 448        const unsigned long now = jiffies;
 449
 450        return time_before(now, rwb->last_issue + HZ / 10) ||
 451                time_before(now, rwb->last_comp + HZ / 10);
 452}
 453
 454#define REQ_HIPRIO      (REQ_SYNC | REQ_META | REQ_PRIO)
 455
 456static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
 457{
 458        unsigned int limit;
 459
 460        /*
 461         * If we got disabled, just return UINT_MAX. This ensures that
 462         * we'll properly inc a new IO, and dec+wakeup at the end.
 463         */
 464        if (!rwb_enabled(rwb))
 465                return UINT_MAX;
 466
 467        if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
 468                return rwb->wb_background;
 469
 470        /*
 471         * At this point we know it's a buffered write. If this is
 472         * kswapd trying to free memory, or REQ_SYNC is set, then
 473         * it's WB_SYNC_ALL writeback, and we'll use the max limit for
 474         * that. If the write is marked as a background write, then use
 475         * the idle limit, or go to normal if we haven't had competing
 476         * IO for a bit.
 477         */
 478        if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
 479                limit = rwb->rq_depth.max_depth;
 480        else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
 481                /*
 482                 * If less than 100ms since we completed unrelated IO,
 483                 * limit us to half the depth for background writeback.
 484                 */
 485                limit = rwb->wb_background;
 486        } else
 487                limit = rwb->wb_normal;
 488
 489        return limit;
 490}
 491
 492struct wbt_wait_data {
 493        struct rq_wb *rwb;
 494        enum wbt_flags wb_acct;
 495        unsigned long rw;
 496};
 497
 498static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
 499{
 500        struct wbt_wait_data *data = private_data;
 501        return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
 502}
 503
 504static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
 505{
 506        struct wbt_wait_data *data = private_data;
 507        wbt_rqw_done(data->rwb, rqw, data->wb_acct);
 508}
 509
 510/*
 511 * Block if we will exceed our limit, or if we are currently waiting for
 512 * the timer to kick off queuing again.
 513 */
 514static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
 515                       unsigned long rw)
 516{
 517        struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
 518        struct wbt_wait_data data = {
 519                .rwb = rwb,
 520                .wb_acct = wb_acct,
 521                .rw = rw,
 522        };
 523
 524        rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
 525}
 526
 527static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
 528{
 529        switch (bio_op(bio)) {
 530        case REQ_OP_WRITE:
 531                /*
 532                 * Don't throttle WRITE_ODIRECT
 533                 */
 534                if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
 535                    (REQ_SYNC | REQ_IDLE))
 536                        return false;
 537                /* fallthrough */
 538        case REQ_OP_DISCARD:
 539                return true;
 540        default:
 541                return false;
 542        }
 543}
 544
 545static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
 546{
 547        enum wbt_flags flags = 0;
 548
 549        if (!rwb_enabled(rwb))
 550                return 0;
 551
 552        if (bio_op(bio) == REQ_OP_READ) {
 553                flags = WBT_READ;
 554        } else if (wbt_should_throttle(rwb, bio)) {
 555                if (current_is_kswapd())
 556                        flags |= WBT_KSWAPD;
 557                if (bio_op(bio) == REQ_OP_DISCARD)
 558                        flags |= WBT_DISCARD;
 559                flags |= WBT_TRACKED;
 560        }
 561        return flags;
 562}
 563
 564static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
 565{
 566        struct rq_wb *rwb = RQWB(rqos);
 567        enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
 568        __wbt_done(rqos, flags);
 569}
 570
 571/*
 572 * Returns true if the IO request should be accounted, false if not.
 573 * May sleep, if we have exceeded the writeback limits. Caller can pass
 574 * in an irq held spinlock, if it holds one when calling this function.
 575 * If we do sleep, we'll release and re-grab it.
 576 */
 577static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
 578{
 579        struct rq_wb *rwb = RQWB(rqos);
 580        enum wbt_flags flags;
 581
 582        flags = bio_to_wbt_flags(rwb, bio);
 583        if (!(flags & WBT_TRACKED)) {
 584                if (flags & WBT_READ)
 585                        wb_timestamp(rwb, &rwb->last_issue);
 586                return;
 587        }
 588
 589        __wbt_wait(rwb, flags, bio->bi_opf);
 590
 591        if (!blk_stat_is_active(rwb->cb))
 592                rwb_arm_timer(rwb);
 593}
 594
 595static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
 596{
 597        struct rq_wb *rwb = RQWB(rqos);
 598        rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
 599}
 600
 601static void wbt_issue(struct rq_qos *rqos, struct request *rq)
 602{
 603        struct rq_wb *rwb = RQWB(rqos);
 604
 605        if (!rwb_enabled(rwb))
 606                return;
 607
 608        /*
 609         * Track sync issue, in case it takes a long time to complete. Allows us
 610         * to react quicker, if a sync IO takes a long time to complete. Note
 611         * that this is just a hint. The request can go away when it completes,
 612         * so it's important we never dereference it. We only use the address to
 613         * compare with, which is why we store the sync_issue time locally.
 614         */
 615        if (wbt_is_read(rq) && !rwb->sync_issue) {
 616                rwb->sync_cookie = rq;
 617                rwb->sync_issue = rq->io_start_time_ns;
 618        }
 619}
 620
 621static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
 622{
 623        struct rq_wb *rwb = RQWB(rqos);
 624        if (!rwb_enabled(rwb))
 625                return;
 626        if (rq == rwb->sync_cookie) {
 627                rwb->sync_issue = 0;
 628                rwb->sync_cookie = NULL;
 629        }
 630}
 631
 632void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
 633{
 634        struct rq_qos *rqos = wbt_rq_qos(q);
 635        if (rqos) {
 636                RQWB(rqos)->rq_depth.queue_depth = depth;
 637                __wbt_update_limits(RQWB(rqos));
 638        }
 639}
 640
 641void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
 642{
 643        struct rq_qos *rqos = wbt_rq_qos(q);
 644        if (rqos)
 645                RQWB(rqos)->wc = write_cache_on;
 646}
 647
 648/*
 649 * Enable wbt if defaults are configured that way
 650 */
 651void wbt_enable_default(struct request_queue *q)
 652{
 653        struct rq_qos *rqos = wbt_rq_qos(q);
 654        /* Throttling already enabled? */
 655        if (rqos)
 656                return;
 657
 658        /* Queue not registered? Maybe shutting down... */
 659        if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
 660                return;
 661
 662        if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
 663                wbt_init(q);
 664}
 665EXPORT_SYMBOL_GPL(wbt_enable_default);
 666
 667u64 wbt_default_latency_nsec(struct request_queue *q)
 668{
 669        /*
 670         * We default to 2msec for non-rotational storage, and 75msec
 671         * for rotational storage.
 672         */
 673        if (blk_queue_nonrot(q))
 674                return 2000000ULL;
 675        else
 676                return 75000000ULL;
 677}
 678
 679static int wbt_data_dir(const struct request *rq)
 680{
 681        const int op = req_op(rq);
 682
 683        if (op == REQ_OP_READ)
 684                return READ;
 685        else if (op_is_write(op))
 686                return WRITE;
 687
 688        /* don't account */
 689        return -1;
 690}
 691
 692static void wbt_exit(struct rq_qos *rqos)
 693{
 694        struct rq_wb *rwb = RQWB(rqos);
 695        struct request_queue *q = rqos->q;
 696
 697        blk_stat_remove_callback(q, rwb->cb);
 698        blk_stat_free_callback(rwb->cb);
 699        kfree(rwb);
 700}
 701
 702/*
 703 * Disable wbt, if enabled by default.
 704 */
 705void wbt_disable_default(struct request_queue *q)
 706{
 707        struct rq_qos *rqos = wbt_rq_qos(q);
 708        struct rq_wb *rwb;
 709        if (!rqos)
 710                return;
 711        rwb = RQWB(rqos);
 712        if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
 713                blk_stat_deactivate(rwb->cb);
 714                rwb->wb_normal = 0;
 715        }
 716}
 717EXPORT_SYMBOL_GPL(wbt_disable_default);
 718
 719#ifdef CONFIG_BLK_DEBUG_FS
 720static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
 721{
 722        struct rq_qos *rqos = data;
 723        struct rq_wb *rwb = RQWB(rqos);
 724
 725        seq_printf(m, "%llu\n", rwb->cur_win_nsec);
 726        return 0;
 727}
 728
 729static int wbt_enabled_show(void *data, struct seq_file *m)
 730{
 731        struct rq_qos *rqos = data;
 732        struct rq_wb *rwb = RQWB(rqos);
 733
 734        seq_printf(m, "%d\n", rwb->enable_state);
 735        return 0;
 736}
 737
 738static int wbt_id_show(void *data, struct seq_file *m)
 739{
 740        struct rq_qos *rqos = data;
 741
 742        seq_printf(m, "%u\n", rqos->id);
 743        return 0;
 744}
 745
 746static int wbt_inflight_show(void *data, struct seq_file *m)
 747{
 748        struct rq_qos *rqos = data;
 749        struct rq_wb *rwb = RQWB(rqos);
 750        int i;
 751
 752        for (i = 0; i < WBT_NUM_RWQ; i++)
 753                seq_printf(m, "%d: inflight %d\n", i,
 754                           atomic_read(&rwb->rq_wait[i].inflight));
 755        return 0;
 756}
 757
 758static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
 759{
 760        struct rq_qos *rqos = data;
 761        struct rq_wb *rwb = RQWB(rqos);
 762
 763        seq_printf(m, "%lu\n", rwb->min_lat_nsec);
 764        return 0;
 765}
 766
 767static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
 768{
 769        struct rq_qos *rqos = data;
 770        struct rq_wb *rwb = RQWB(rqos);
 771
 772        seq_printf(m, "%u\n", rwb->unknown_cnt);
 773        return 0;
 774}
 775
 776static int wbt_normal_show(void *data, struct seq_file *m)
 777{
 778        struct rq_qos *rqos = data;
 779        struct rq_wb *rwb = RQWB(rqos);
 780
 781        seq_printf(m, "%u\n", rwb->wb_normal);
 782        return 0;
 783}
 784
 785static int wbt_background_show(void *data, struct seq_file *m)
 786{
 787        struct rq_qos *rqos = data;
 788        struct rq_wb *rwb = RQWB(rqos);
 789
 790        seq_printf(m, "%u\n", rwb->wb_background);
 791        return 0;
 792}
 793
 794static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
 795        {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
 796        {"enabled", 0400, wbt_enabled_show},
 797        {"id", 0400, wbt_id_show},
 798        {"inflight", 0400, wbt_inflight_show},
 799        {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
 800        {"unknown_cnt", 0400, wbt_unknown_cnt_show},
 801        {"wb_normal", 0400, wbt_normal_show},
 802        {"wb_background", 0400, wbt_background_show},
 803        {},
 804};
 805#endif
 806
 807static struct rq_qos_ops wbt_rqos_ops = {
 808        .throttle = wbt_wait,
 809        .issue = wbt_issue,
 810        .track = wbt_track,
 811        .requeue = wbt_requeue,
 812        .done = wbt_done,
 813        .cleanup = wbt_cleanup,
 814        .exit = wbt_exit,
 815#ifdef CONFIG_BLK_DEBUG_FS
 816        .debugfs_attrs = wbt_debugfs_attrs,
 817#endif
 818};
 819
 820int wbt_init(struct request_queue *q)
 821{
 822        struct rq_wb *rwb;
 823        int i;
 824
 825        rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
 826        if (!rwb)
 827                return -ENOMEM;
 828
 829        rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
 830        if (!rwb->cb) {
 831                kfree(rwb);
 832                return -ENOMEM;
 833        }
 834
 835        for (i = 0; i < WBT_NUM_RWQ; i++)
 836                rq_wait_init(&rwb->rq_wait[i]);
 837
 838        rwb->rqos.id = RQ_QOS_WBT;
 839        rwb->rqos.ops = &wbt_rqos_ops;
 840        rwb->rqos.q = q;
 841        rwb->last_comp = rwb->last_issue = jiffies;
 842        rwb->win_nsec = RWB_WINDOW_NSEC;
 843        rwb->enable_state = WBT_STATE_ON_DEFAULT;
 844        rwb->wc = 1;
 845        rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
 846        __wbt_update_limits(rwb);
 847
 848        /*
 849         * Assign rwb and add the stats callback.
 850         */
 851        rq_qos_add(q, &rwb->rqos);
 852        blk_stat_add_callback(q, rwb->cb);
 853
 854        rwb->min_lat_nsec = wbt_default_latency_nsec(q);
 855
 856        wbt_set_queue_depth(q, blk_queue_depth(q));
 857        wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 858
 859        return 0;
 860}
 861