linux/block/blk-wbt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * buffered writeback throttling. loosely based on CoDel. We can't drop
   4 * packets for IO scheduling, so the logic is something like this:
   5 *
   6 * - Monitor latencies in a defined window of time.
   7 * - If the minimum latency in the above window exceeds some target, increment
   8 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
   9 *   window is then shrunk to 100 / sqrt(scaling step + 1).
  10 * - For any window where we don't have solid data on what the latencies
  11 *   look like, retain status quo.
  12 * - If latencies look good, decrement scaling step.
  13 * - If we're only doing writes, allow the scaling step to go negative. This
  14 *   will temporarily boost write performance, snapping back to a stable
  15 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
  16 *   positive scaling steps where we shrink the monitoring window, a negative
  17 *   scaling step retains the default step==0 window size.
  18 *
  19 * Copyright (C) 2016 Jens Axboe
  20 *
  21 */
  22#include <linux/kernel.h>
  23#include <linux/blk_types.h>
  24#include <linux/slab.h>
  25#include <linux/backing-dev.h>
  26#include <linux/swap.h>
  27
  28#include "blk-wbt.h"
  29#include "blk-rq-qos.h"
  30
  31#define CREATE_TRACE_POINTS
  32#include <trace/events/wbt.h>
  33
  34static inline void wbt_clear_state(struct request *rq)
  35{
  36        rq->wbt_flags = 0;
  37}
  38
  39static inline enum wbt_flags wbt_flags(struct request *rq)
  40{
  41        return rq->wbt_flags;
  42}
  43
  44static inline bool wbt_is_tracked(struct request *rq)
  45{
  46        return rq->wbt_flags & WBT_TRACKED;
  47}
  48
  49static inline bool wbt_is_read(struct request *rq)
  50{
  51        return rq->wbt_flags & WBT_READ;
  52}
  53
  54enum {
  55        /*
  56         * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
  57         * from here depending on device stats
  58         */
  59        RWB_DEF_DEPTH   = 16,
  60
  61        /*
  62         * 100msec window
  63         */
  64        RWB_WINDOW_NSEC         = 100 * 1000 * 1000ULL,
  65
  66        /*
  67         * Disregard stats, if we don't meet this minimum
  68         */
  69        RWB_MIN_WRITE_SAMPLES   = 3,
  70
  71        /*
  72         * If we have this number of consecutive windows with not enough
  73         * information to scale up or down, scale up.
  74         */
  75        RWB_UNKNOWN_BUMP        = 5,
  76};
  77
  78static inline bool rwb_enabled(struct rq_wb *rwb)
  79{
  80        return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
  81                      rwb->wb_normal != 0;
  82}
  83
  84static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
  85{
  86        if (rwb_enabled(rwb)) {
  87                const unsigned long cur = jiffies;
  88
  89                if (cur != *var)
  90                        *var = cur;
  91        }
  92}
  93
  94/*
  95 * If a task was rate throttled in balance_dirty_pages() within the last
  96 * second or so, use that to indicate a higher cleaning rate.
  97 */
  98static bool wb_recent_wait(struct rq_wb *rwb)
  99{
 100        struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
 101
 102        return time_before(jiffies, wb->dirty_sleep + HZ);
 103}
 104
 105static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
 106                                          enum wbt_flags wb_acct)
 107{
 108        if (wb_acct & WBT_KSWAPD)
 109                return &rwb->rq_wait[WBT_RWQ_KSWAPD];
 110        else if (wb_acct & WBT_DISCARD)
 111                return &rwb->rq_wait[WBT_RWQ_DISCARD];
 112
 113        return &rwb->rq_wait[WBT_RWQ_BG];
 114}
 115
 116static void rwb_wake_all(struct rq_wb *rwb)
 117{
 118        int i;
 119
 120        for (i = 0; i < WBT_NUM_RWQ; i++) {
 121                struct rq_wait *rqw = &rwb->rq_wait[i];
 122
 123                if (wq_has_sleeper(&rqw->wait))
 124                        wake_up_all(&rqw->wait);
 125        }
 126}
 127
 128static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
 129                         enum wbt_flags wb_acct)
 130{
 131        int inflight, limit;
 132
 133        inflight = atomic_dec_return(&rqw->inflight);
 134
 135        /*
 136         * wbt got disabled with IO in flight. Wake up any potential
 137         * waiters, we don't have to do more than that.
 138         */
 139        if (unlikely(!rwb_enabled(rwb))) {
 140                rwb_wake_all(rwb);
 141                return;
 142        }
 143
 144        /*
 145         * For discards, our limit is always the background. For writes, if
 146         * the device does write back caching, drop further down before we
 147         * wake people up.
 148         */
 149        if (wb_acct & WBT_DISCARD)
 150                limit = rwb->wb_background;
 151        else if (rwb->wc && !wb_recent_wait(rwb))
 152                limit = 0;
 153        else
 154                limit = rwb->wb_normal;
 155
 156        /*
 157         * Don't wake anyone up if we are above the normal limit.
 158         */
 159        if (inflight && inflight >= limit)
 160                return;
 161
 162        if (wq_has_sleeper(&rqw->wait)) {
 163                int diff = limit - inflight;
 164
 165                if (!inflight || diff >= rwb->wb_background / 2)
 166                        wake_up_all(&rqw->wait);
 167        }
 168}
 169
 170static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
 171{
 172        struct rq_wb *rwb = RQWB(rqos);
 173        struct rq_wait *rqw;
 174
 175        if (!(wb_acct & WBT_TRACKED))
 176                return;
 177
 178        rqw = get_rq_wait(rwb, wb_acct);
 179        wbt_rqw_done(rwb, rqw, wb_acct);
 180}
 181
 182/*
 183 * Called on completion of a request. Note that it's also called when
 184 * a request is merged, when the request gets freed.
 185 */
 186static void wbt_done(struct rq_qos *rqos, struct request *rq)
 187{
 188        struct rq_wb *rwb = RQWB(rqos);
 189
 190        if (!wbt_is_tracked(rq)) {
 191                if (rwb->sync_cookie == rq) {
 192                        rwb->sync_issue = 0;
 193                        rwb->sync_cookie = NULL;
 194                }
 195
 196                if (wbt_is_read(rq))
 197                        wb_timestamp(rwb, &rwb->last_comp);
 198        } else {
 199                WARN_ON_ONCE(rq == rwb->sync_cookie);
 200                __wbt_done(rqos, wbt_flags(rq));
 201        }
 202        wbt_clear_state(rq);
 203}
 204
 205static inline bool stat_sample_valid(struct blk_rq_stat *stat)
 206{
 207        /*
 208         * We need at least one read sample, and a minimum of
 209         * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
 210         * that it's writes impacting us, and not just some sole read on
 211         * a device that is in a lower power state.
 212         */
 213        return (stat[READ].nr_samples >= 1 &&
 214                stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
 215}
 216
 217static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
 218{
 219        u64 now, issue = READ_ONCE(rwb->sync_issue);
 220
 221        if (!issue || !rwb->sync_cookie)
 222                return 0;
 223
 224        now = ktime_to_ns(ktime_get());
 225        return now - issue;
 226}
 227
 228enum {
 229        LAT_OK = 1,
 230        LAT_UNKNOWN,
 231        LAT_UNKNOWN_WRITES,
 232        LAT_EXCEEDED,
 233};
 234
 235static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
 236{
 237        struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
 238        struct rq_depth *rqd = &rwb->rq_depth;
 239        u64 thislat;
 240
 241        /*
 242         * If our stored sync issue exceeds the window size, or it
 243         * exceeds our min target AND we haven't logged any entries,
 244         * flag the latency as exceeded. wbt works off completion latencies,
 245         * but for a flooded device, a single sync IO can take a long time
 246         * to complete after being issued. If this time exceeds our
 247         * monitoring window AND we didn't see any other completions in that
 248         * window, then count that sync IO as a violation of the latency.
 249         */
 250        thislat = rwb_sync_issue_lat(rwb);
 251        if (thislat > rwb->cur_win_nsec ||
 252            (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
 253                trace_wbt_lat(bdi, thislat);
 254                return LAT_EXCEEDED;
 255        }
 256
 257        /*
 258         * No read/write mix, if stat isn't valid
 259         */
 260        if (!stat_sample_valid(stat)) {
 261                /*
 262                 * If we had writes in this stat window and the window is
 263                 * current, we're only doing writes. If a task recently
 264                 * waited or still has writes in flights, consider us doing
 265                 * just writes as well.
 266                 */
 267                if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
 268                    wbt_inflight(rwb))
 269                        return LAT_UNKNOWN_WRITES;
 270                return LAT_UNKNOWN;
 271        }
 272
 273        /*
 274         * If the 'min' latency exceeds our target, step down.
 275         */
 276        if (stat[READ].min > rwb->min_lat_nsec) {
 277                trace_wbt_lat(bdi, stat[READ].min);
 278                trace_wbt_stat(bdi, stat);
 279                return LAT_EXCEEDED;
 280        }
 281
 282        if (rqd->scale_step)
 283                trace_wbt_stat(bdi, stat);
 284
 285        return LAT_OK;
 286}
 287
 288static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
 289{
 290        struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
 291        struct rq_depth *rqd = &rwb->rq_depth;
 292
 293        trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
 294                        rwb->wb_background, rwb->wb_normal, rqd->max_depth);
 295}
 296
 297static void calc_wb_limits(struct rq_wb *rwb)
 298{
 299        if (rwb->min_lat_nsec == 0) {
 300                rwb->wb_normal = rwb->wb_background = 0;
 301        } else if (rwb->rq_depth.max_depth <= 2) {
 302                rwb->wb_normal = rwb->rq_depth.max_depth;
 303                rwb->wb_background = 1;
 304        } else {
 305                rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
 306                rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
 307        }
 308}
 309
 310static void scale_up(struct rq_wb *rwb)
 311{
 312        if (!rq_depth_scale_up(&rwb->rq_depth))
 313                return;
 314        calc_wb_limits(rwb);
 315        rwb->unknown_cnt = 0;
 316        rwb_wake_all(rwb);
 317        rwb_trace_step(rwb, tracepoint_string("scale up"));
 318}
 319
 320static void scale_down(struct rq_wb *rwb, bool hard_throttle)
 321{
 322        if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
 323                return;
 324        calc_wb_limits(rwb);
 325        rwb->unknown_cnt = 0;
 326        rwb_trace_step(rwb, tracepoint_string("scale down"));
 327}
 328
 329static void rwb_arm_timer(struct rq_wb *rwb)
 330{
 331        struct rq_depth *rqd = &rwb->rq_depth;
 332
 333        if (rqd->scale_step > 0) {
 334                /*
 335                 * We should speed this up, using some variant of a fast
 336                 * integer inverse square root calculation. Since we only do
 337                 * this for every window expiration, it's not a huge deal,
 338                 * though.
 339                 */
 340                rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
 341                                        int_sqrt((rqd->scale_step + 1) << 8));
 342        } else {
 343                /*
 344                 * For step < 0, we don't want to increase/decrease the
 345                 * window size.
 346                 */
 347                rwb->cur_win_nsec = rwb->win_nsec;
 348        }
 349
 350        blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
 351}
 352
 353static void wb_timer_fn(struct blk_stat_callback *cb)
 354{
 355        struct rq_wb *rwb = cb->data;
 356        struct rq_depth *rqd = &rwb->rq_depth;
 357        unsigned int inflight = wbt_inflight(rwb);
 358        int status;
 359
 360        status = latency_exceeded(rwb, cb->stat);
 361
 362        trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
 363                        inflight);
 364
 365        /*
 366         * If we exceeded the latency target, step down. If we did not,
 367         * step one level up. If we don't know enough to say either exceeded
 368         * or ok, then don't do anything.
 369         */
 370        switch (status) {
 371        case LAT_EXCEEDED:
 372                scale_down(rwb, true);
 373                break;
 374        case LAT_OK:
 375                scale_up(rwb);
 376                break;
 377        case LAT_UNKNOWN_WRITES:
 378                /*
 379                 * We started a the center step, but don't have a valid
 380                 * read/write sample, but we do have writes going on.
 381                 * Allow step to go negative, to increase write perf.
 382                 */
 383                scale_up(rwb);
 384                break;
 385        case LAT_UNKNOWN:
 386                if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
 387                        break;
 388                /*
 389                 * We get here when previously scaled reduced depth, and we
 390                 * currently don't have a valid read/write sample. For that
 391                 * case, slowly return to center state (step == 0).
 392                 */
 393                if (rqd->scale_step > 0)
 394                        scale_up(rwb);
 395                else if (rqd->scale_step < 0)
 396                        scale_down(rwb, false);
 397                break;
 398        default:
 399                break;
 400        }
 401
 402        /*
 403         * Re-arm timer, if we have IO in flight
 404         */
 405        if (rqd->scale_step || inflight)
 406                rwb_arm_timer(rwb);
 407}
 408
 409static void wbt_update_limits(struct rq_wb *rwb)
 410{
 411        struct rq_depth *rqd = &rwb->rq_depth;
 412
 413        rqd->scale_step = 0;
 414        rqd->scaled_max = false;
 415
 416        rq_depth_calc_max_depth(rqd);
 417        calc_wb_limits(rwb);
 418
 419        rwb_wake_all(rwb);
 420}
 421
 422u64 wbt_get_min_lat(struct request_queue *q)
 423{
 424        struct rq_qos *rqos = wbt_rq_qos(q);
 425        if (!rqos)
 426                return 0;
 427        return RQWB(rqos)->min_lat_nsec;
 428}
 429
 430void wbt_set_min_lat(struct request_queue *q, u64 val)
 431{
 432        struct rq_qos *rqos = wbt_rq_qos(q);
 433        if (!rqos)
 434                return;
 435        RQWB(rqos)->min_lat_nsec = val;
 436        RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
 437        wbt_update_limits(RQWB(rqos));
 438}
 439
 440
 441static bool close_io(struct rq_wb *rwb)
 442{
 443        const unsigned long now = jiffies;
 444
 445        return time_before(now, rwb->last_issue + HZ / 10) ||
 446                time_before(now, rwb->last_comp + HZ / 10);
 447}
 448
 449#define REQ_HIPRIO      (REQ_SYNC | REQ_META | REQ_PRIO)
 450
 451static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
 452{
 453        unsigned int limit;
 454
 455        /*
 456         * If we got disabled, just return UINT_MAX. This ensures that
 457         * we'll properly inc a new IO, and dec+wakeup at the end.
 458         */
 459        if (!rwb_enabled(rwb))
 460                return UINT_MAX;
 461
 462        if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
 463                return rwb->wb_background;
 464
 465        /*
 466         * At this point we know it's a buffered write. If this is
 467         * kswapd trying to free memory, or REQ_SYNC is set, then
 468         * it's WB_SYNC_ALL writeback, and we'll use the max limit for
 469         * that. If the write is marked as a background write, then use
 470         * the idle limit, or go to normal if we haven't had competing
 471         * IO for a bit.
 472         */
 473        if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
 474                limit = rwb->rq_depth.max_depth;
 475        else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
 476                /*
 477                 * If less than 100ms since we completed unrelated IO,
 478                 * limit us to half the depth for background writeback.
 479                 */
 480                limit = rwb->wb_background;
 481        } else
 482                limit = rwb->wb_normal;
 483
 484        return limit;
 485}
 486
 487struct wbt_wait_data {
 488        struct rq_wb *rwb;
 489        enum wbt_flags wb_acct;
 490        unsigned long rw;
 491};
 492
 493static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
 494{
 495        struct wbt_wait_data *data = private_data;
 496        return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
 497}
 498
 499static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
 500{
 501        struct wbt_wait_data *data = private_data;
 502        wbt_rqw_done(data->rwb, rqw, data->wb_acct);
 503}
 504
 505/*
 506 * Block if we will exceed our limit, or if we are currently waiting for
 507 * the timer to kick off queuing again.
 508 */
 509static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
 510                       unsigned long rw)
 511{
 512        struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
 513        struct wbt_wait_data data = {
 514                .rwb = rwb,
 515                .wb_acct = wb_acct,
 516                .rw = rw,
 517        };
 518
 519        rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
 520}
 521
 522static inline bool wbt_should_throttle(struct bio *bio)
 523{
 524        switch (bio_op(bio)) {
 525        case REQ_OP_WRITE:
 526                /*
 527                 * Don't throttle WRITE_ODIRECT
 528                 */
 529                if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
 530                    (REQ_SYNC | REQ_IDLE))
 531                        return false;
 532                fallthrough;
 533        case REQ_OP_DISCARD:
 534                return true;
 535        default:
 536                return false;
 537        }
 538}
 539
 540static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
 541{
 542        enum wbt_flags flags = 0;
 543
 544        if (!rwb_enabled(rwb))
 545                return 0;
 546
 547        if (bio_op(bio) == REQ_OP_READ) {
 548                flags = WBT_READ;
 549        } else if (wbt_should_throttle(bio)) {
 550                if (current_is_kswapd())
 551                        flags |= WBT_KSWAPD;
 552                if (bio_op(bio) == REQ_OP_DISCARD)
 553                        flags |= WBT_DISCARD;
 554                flags |= WBT_TRACKED;
 555        }
 556        return flags;
 557}
 558
 559static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
 560{
 561        struct rq_wb *rwb = RQWB(rqos);
 562        enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
 563        __wbt_done(rqos, flags);
 564}
 565
 566/*
 567 * May sleep, if we have exceeded the writeback limits. Caller can pass
 568 * in an irq held spinlock, if it holds one when calling this function.
 569 * If we do sleep, we'll release and re-grab it.
 570 */
 571static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
 572{
 573        struct rq_wb *rwb = RQWB(rqos);
 574        enum wbt_flags flags;
 575
 576        flags = bio_to_wbt_flags(rwb, bio);
 577        if (!(flags & WBT_TRACKED)) {
 578                if (flags & WBT_READ)
 579                        wb_timestamp(rwb, &rwb->last_issue);
 580                return;
 581        }
 582
 583        __wbt_wait(rwb, flags, bio->bi_opf);
 584
 585        if (!blk_stat_is_active(rwb->cb))
 586                rwb_arm_timer(rwb);
 587}
 588
 589static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
 590{
 591        struct rq_wb *rwb = RQWB(rqos);
 592        rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
 593}
 594
 595static void wbt_issue(struct rq_qos *rqos, struct request *rq)
 596{
 597        struct rq_wb *rwb = RQWB(rqos);
 598
 599        if (!rwb_enabled(rwb))
 600                return;
 601
 602        /*
 603         * Track sync issue, in case it takes a long time to complete. Allows us
 604         * to react quicker, if a sync IO takes a long time to complete. Note
 605         * that this is just a hint. The request can go away when it completes,
 606         * so it's important we never dereference it. We only use the address to
 607         * compare with, which is why we store the sync_issue time locally.
 608         */
 609        if (wbt_is_read(rq) && !rwb->sync_issue) {
 610                rwb->sync_cookie = rq;
 611                rwb->sync_issue = rq->io_start_time_ns;
 612        }
 613}
 614
 615static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
 616{
 617        struct rq_wb *rwb = RQWB(rqos);
 618        if (!rwb_enabled(rwb))
 619                return;
 620        if (rq == rwb->sync_cookie) {
 621                rwb->sync_issue = 0;
 622                rwb->sync_cookie = NULL;
 623        }
 624}
 625
 626void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
 627{
 628        struct rq_qos *rqos = wbt_rq_qos(q);
 629        if (rqos)
 630                RQWB(rqos)->wc = write_cache_on;
 631}
 632
 633/*
 634 * Enable wbt if defaults are configured that way
 635 */
 636void wbt_enable_default(struct request_queue *q)
 637{
 638        struct rq_qos *rqos = wbt_rq_qos(q);
 639
 640        /* Throttling already enabled? */
 641        if (rqos) {
 642                if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
 643                        RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
 644                return;
 645        }
 646
 647        /* Queue not registered? Maybe shutting down... */
 648        if (!blk_queue_registered(q))
 649                return;
 650
 651        if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
 652                wbt_init(q);
 653}
 654EXPORT_SYMBOL_GPL(wbt_enable_default);
 655
 656u64 wbt_default_latency_nsec(struct request_queue *q)
 657{
 658        /*
 659         * We default to 2msec for non-rotational storage, and 75msec
 660         * for rotational storage.
 661         */
 662        if (blk_queue_nonrot(q))
 663                return 2000000ULL;
 664        else
 665                return 75000000ULL;
 666}
 667
 668static int wbt_data_dir(const struct request *rq)
 669{
 670        const int op = req_op(rq);
 671
 672        if (op == REQ_OP_READ)
 673                return READ;
 674        else if (op_is_write(op))
 675                return WRITE;
 676
 677        /* don't account */
 678        return -1;
 679}
 680
 681static void wbt_queue_depth_changed(struct rq_qos *rqos)
 682{
 683        RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
 684        wbt_update_limits(RQWB(rqos));
 685}
 686
 687static void wbt_exit(struct rq_qos *rqos)
 688{
 689        struct rq_wb *rwb = RQWB(rqos);
 690        struct request_queue *q = rqos->q;
 691
 692        blk_stat_remove_callback(q, rwb->cb);
 693        blk_stat_free_callback(rwb->cb);
 694        kfree(rwb);
 695}
 696
 697/*
 698 * Disable wbt, if enabled by default.
 699 */
 700void wbt_disable_default(struct request_queue *q)
 701{
 702        struct rq_qos *rqos = wbt_rq_qos(q);
 703        struct rq_wb *rwb;
 704        if (!rqos)
 705                return;
 706        rwb = RQWB(rqos);
 707        if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
 708                blk_stat_deactivate(rwb->cb);
 709                rwb->enable_state = WBT_STATE_OFF_DEFAULT;
 710        }
 711}
 712EXPORT_SYMBOL_GPL(wbt_disable_default);
 713
 714#ifdef CONFIG_BLK_DEBUG_FS
 715static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
 716{
 717        struct rq_qos *rqos = data;
 718        struct rq_wb *rwb = RQWB(rqos);
 719
 720        seq_printf(m, "%llu\n", rwb->cur_win_nsec);
 721        return 0;
 722}
 723
 724static int wbt_enabled_show(void *data, struct seq_file *m)
 725{
 726        struct rq_qos *rqos = data;
 727        struct rq_wb *rwb = RQWB(rqos);
 728
 729        seq_printf(m, "%d\n", rwb->enable_state);
 730        return 0;
 731}
 732
 733static int wbt_id_show(void *data, struct seq_file *m)
 734{
 735        struct rq_qos *rqos = data;
 736
 737        seq_printf(m, "%u\n", rqos->id);
 738        return 0;
 739}
 740
 741static int wbt_inflight_show(void *data, struct seq_file *m)
 742{
 743        struct rq_qos *rqos = data;
 744        struct rq_wb *rwb = RQWB(rqos);
 745        int i;
 746
 747        for (i = 0; i < WBT_NUM_RWQ; i++)
 748                seq_printf(m, "%d: inflight %d\n", i,
 749                           atomic_read(&rwb->rq_wait[i].inflight));
 750        return 0;
 751}
 752
 753static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
 754{
 755        struct rq_qos *rqos = data;
 756        struct rq_wb *rwb = RQWB(rqos);
 757
 758        seq_printf(m, "%lu\n", rwb->min_lat_nsec);
 759        return 0;
 760}
 761
 762static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
 763{
 764        struct rq_qos *rqos = data;
 765        struct rq_wb *rwb = RQWB(rqos);
 766
 767        seq_printf(m, "%u\n", rwb->unknown_cnt);
 768        return 0;
 769}
 770
 771static int wbt_normal_show(void *data, struct seq_file *m)
 772{
 773        struct rq_qos *rqos = data;
 774        struct rq_wb *rwb = RQWB(rqos);
 775
 776        seq_printf(m, "%u\n", rwb->wb_normal);
 777        return 0;
 778}
 779
 780static int wbt_background_show(void *data, struct seq_file *m)
 781{
 782        struct rq_qos *rqos = data;
 783        struct rq_wb *rwb = RQWB(rqos);
 784
 785        seq_printf(m, "%u\n", rwb->wb_background);
 786        return 0;
 787}
 788
 789static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
 790        {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
 791        {"enabled", 0400, wbt_enabled_show},
 792        {"id", 0400, wbt_id_show},
 793        {"inflight", 0400, wbt_inflight_show},
 794        {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
 795        {"unknown_cnt", 0400, wbt_unknown_cnt_show},
 796        {"wb_normal", 0400, wbt_normal_show},
 797        {"wb_background", 0400, wbt_background_show},
 798        {},
 799};
 800#endif
 801
 802static struct rq_qos_ops wbt_rqos_ops = {
 803        .throttle = wbt_wait,
 804        .issue = wbt_issue,
 805        .track = wbt_track,
 806        .requeue = wbt_requeue,
 807        .done = wbt_done,
 808        .cleanup = wbt_cleanup,
 809        .queue_depth_changed = wbt_queue_depth_changed,
 810        .exit = wbt_exit,
 811#ifdef CONFIG_BLK_DEBUG_FS
 812        .debugfs_attrs = wbt_debugfs_attrs,
 813#endif
 814};
 815
 816int wbt_init(struct request_queue *q)
 817{
 818        struct rq_wb *rwb;
 819        int i;
 820
 821        rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
 822        if (!rwb)
 823                return -ENOMEM;
 824
 825        rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
 826        if (!rwb->cb) {
 827                kfree(rwb);
 828                return -ENOMEM;
 829        }
 830
 831        for (i = 0; i < WBT_NUM_RWQ; i++)
 832                rq_wait_init(&rwb->rq_wait[i]);
 833
 834        rwb->rqos.id = RQ_QOS_WBT;
 835        rwb->rqos.ops = &wbt_rqos_ops;
 836        rwb->rqos.q = q;
 837        rwb->last_comp = rwb->last_issue = jiffies;
 838        rwb->win_nsec = RWB_WINDOW_NSEC;
 839        rwb->enable_state = WBT_STATE_ON_DEFAULT;
 840        rwb->wc = 1;
 841        rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
 842
 843        /*
 844         * Assign rwb and add the stats callback.
 845         */
 846        rq_qos_add(q, &rwb->rqos);
 847        blk_stat_add_callback(q, rwb->cb);
 848
 849        rwb->min_lat_nsec = wbt_default_latency_nsec(q);
 850
 851        wbt_queue_depth_changed(&rwb->rqos);
 852        wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 853
 854        return 0;
 855}
 856