linux/block/blk-wbt.c
<<
>>
Prefs
   1/*
   2 * buffered writeback throttling. loosely based on CoDel. We can't drop
   3 * packets for IO scheduling, so the logic is something like this:
   4 *
   5 * - Monitor latencies in a defined window of time.
   6 * - If the minimum latency in the above window exceeds some target, increment
   7 *   scaling step and scale down queue depth by a factor of 2x. The monitoring
   8 *   window is then shrunk to 100 / sqrt(scaling step + 1).
   9 * - For any window where we don't have solid data on what the latencies
  10 *   look like, retain status quo.
  11 * - If latencies look good, decrement scaling step.
  12 * - If we're only doing writes, allow the scaling step to go negative. This
  13 *   will temporarily boost write performance, snapping back to a stable
  14 *   scaling step of 0 if reads show up or the heavy writers finish. Unlike
  15 *   positive scaling steps where we shrink the monitoring window, a negative
  16 *   scaling step retains the default step==0 window size.
  17 *
  18 * Copyright (C) 2016 Jens Axboe
  19 *
  20 */
  21#include <linux/kernel.h>
  22#include <linux/blk_types.h>
  23#include <linux/slab.h>
  24#include <linux/backing-dev.h>
  25#include <linux/swap.h>
  26
  27#include "blk-wbt.h"
  28#include "blk-rq-qos.h"
  29
  30#define CREATE_TRACE_POINTS
  31#include <trace/events/wbt.h>
  32
  33static inline void wbt_clear_state(struct request *rq)
  34{
  35        rq->wbt_flags = 0;
  36}
  37
  38static inline enum wbt_flags wbt_flags(struct request *rq)
  39{
  40        return rq->wbt_flags;
  41}
  42
  43static inline bool wbt_is_tracked(struct request *rq)
  44{
  45        return rq->wbt_flags & WBT_TRACKED;
  46}
  47
  48static inline bool wbt_is_read(struct request *rq)
  49{
  50        return rq->wbt_flags & WBT_READ;
  51}
  52
  53enum {
  54        /*
  55         * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
  56         * from here depending on device stats
  57         */
  58        RWB_DEF_DEPTH   = 16,
  59
  60        /*
  61         * 100msec window
  62         */
  63        RWB_WINDOW_NSEC         = 100 * 1000 * 1000ULL,
  64
  65        /*
  66         * Disregard stats, if we don't meet this minimum
  67         */
  68        RWB_MIN_WRITE_SAMPLES   = 3,
  69
  70        /*
  71         * If we have this number of consecutive windows with not enough
  72         * information to scale up or down, scale up.
  73         */
  74        RWB_UNKNOWN_BUMP        = 5,
  75};
  76
  77static inline bool rwb_enabled(struct rq_wb *rwb)
  78{
  79        return rwb && rwb->wb_normal != 0;
  80}
  81
  82static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
  83{
  84        if (rwb_enabled(rwb)) {
  85                const unsigned long cur = jiffies;
  86
  87                if (cur != *var)
  88                        *var = cur;
  89        }
  90}
  91
  92/*
  93 * If a task was rate throttled in balance_dirty_pages() within the last
  94 * second or so, use that to indicate a higher cleaning rate.
  95 */
  96static bool wb_recent_wait(struct rq_wb *rwb)
  97{
  98        struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
  99
 100        return time_before(jiffies, wb->dirty_sleep + HZ);
 101}
 102
 103static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
 104                                          enum wbt_flags wb_acct)
 105{
 106        if (wb_acct & WBT_KSWAPD)
 107                return &rwb->rq_wait[WBT_RWQ_KSWAPD];
 108        else if (wb_acct & WBT_DISCARD)
 109                return &rwb->rq_wait[WBT_RWQ_DISCARD];
 110
 111        return &rwb->rq_wait[WBT_RWQ_BG];
 112}
 113
 114static void rwb_wake_all(struct rq_wb *rwb)
 115{
 116        int i;
 117
 118        for (i = 0; i < WBT_NUM_RWQ; i++) {
 119                struct rq_wait *rqw = &rwb->rq_wait[i];
 120
 121                if (wq_has_sleeper(&rqw->wait))
 122                        wake_up_all(&rqw->wait);
 123        }
 124}
 125
 126static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
 127                         enum wbt_flags wb_acct)
 128{
 129        int inflight, limit;
 130
 131        inflight = atomic_dec_return(&rqw->inflight);
 132
 133        /*
 134         * wbt got disabled with IO in flight. Wake up any potential
 135         * waiters, we don't have to do more than that.
 136         */
 137        if (unlikely(!rwb_enabled(rwb))) {
 138                rwb_wake_all(rwb);
 139                return;
 140        }
 141
 142        /*
 143         * For discards, our limit is always the background. For writes, if
 144         * the device does write back caching, drop further down before we
 145         * wake people up.
 146         */
 147        if (wb_acct & WBT_DISCARD)
 148                limit = rwb->wb_background;
 149        else if (rwb->wc && !wb_recent_wait(rwb))
 150                limit = 0;
 151        else
 152                limit = rwb->wb_normal;
 153
 154        /*
 155         * Don't wake anyone up if we are above the normal limit.
 156         */
 157        if (inflight && inflight >= limit)
 158                return;
 159
 160        if (wq_has_sleeper(&rqw->wait)) {
 161                int diff = limit - inflight;
 162
 163                if (!inflight || diff >= rwb->wb_background / 2)
 164                        wake_up_all(&rqw->wait);
 165        }
 166}
 167
 168static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
 169{
 170        struct rq_wb *rwb = RQWB(rqos);
 171        struct rq_wait *rqw;
 172
 173        if (!(wb_acct & WBT_TRACKED))
 174                return;
 175
 176        rqw = get_rq_wait(rwb, wb_acct);
 177        wbt_rqw_done(rwb, rqw, wb_acct);
 178}
 179
 180/*
 181 * Called on completion of a request. Note that it's also called when
 182 * a request is merged, when the request gets freed.
 183 */
 184static void wbt_done(struct rq_qos *rqos, struct request *rq)
 185{
 186        struct rq_wb *rwb = RQWB(rqos);
 187
 188        if (!wbt_is_tracked(rq)) {
 189                if (rwb->sync_cookie == rq) {
 190                        rwb->sync_issue = 0;
 191                        rwb->sync_cookie = NULL;
 192                }
 193
 194                if (wbt_is_read(rq))
 195                        wb_timestamp(rwb, &rwb->last_comp);
 196        } else {
 197                WARN_ON_ONCE(rq == rwb->sync_cookie);
 198                __wbt_done(rqos, wbt_flags(rq));
 199        }
 200        wbt_clear_state(rq);
 201}
 202
 203static inline bool stat_sample_valid(struct blk_rq_stat *stat)
 204{
 205        /*
 206         * We need at least one read sample, and a minimum of
 207         * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
 208         * that it's writes impacting us, and not just some sole read on
 209         * a device that is in a lower power state.
 210         */
 211        return (stat[READ].nr_samples >= 1 &&
 212                stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
 213}
 214
 215static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
 216{
 217        u64 now, issue = READ_ONCE(rwb->sync_issue);
 218
 219        if (!issue || !rwb->sync_cookie)
 220                return 0;
 221
 222        now = ktime_to_ns(ktime_get());
 223        return now - issue;
 224}
 225
 226enum {
 227        LAT_OK = 1,
 228        LAT_UNKNOWN,
 229        LAT_UNKNOWN_WRITES,
 230        LAT_EXCEEDED,
 231};
 232
 233static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
 234{
 235        struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
 236        struct rq_depth *rqd = &rwb->rq_depth;
 237        u64 thislat;
 238
 239        /*
 240         * If our stored sync issue exceeds the window size, or it
 241         * exceeds our min target AND we haven't logged any entries,
 242         * flag the latency as exceeded. wbt works off completion latencies,
 243         * but for a flooded device, a single sync IO can take a long time
 244         * to complete after being issued. If this time exceeds our
 245         * monitoring window AND we didn't see any other completions in that
 246         * window, then count that sync IO as a violation of the latency.
 247         */
 248        thislat = rwb_sync_issue_lat(rwb);
 249        if (thislat > rwb->cur_win_nsec ||
 250            (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
 251                trace_wbt_lat(bdi, thislat);
 252                return LAT_EXCEEDED;
 253        }
 254
 255        /*
 256         * No read/write mix, if stat isn't valid
 257         */
 258        if (!stat_sample_valid(stat)) {
 259                /*
 260                 * If we had writes in this stat window and the window is
 261                 * current, we're only doing writes. If a task recently
 262                 * waited or still has writes in flights, consider us doing
 263                 * just writes as well.
 264                 */
 265                if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
 266                    wbt_inflight(rwb))
 267                        return LAT_UNKNOWN_WRITES;
 268                return LAT_UNKNOWN;
 269        }
 270
 271        /*
 272         * If the 'min' latency exceeds our target, step down.
 273         */
 274        if (stat[READ].min > rwb->min_lat_nsec) {
 275                trace_wbt_lat(bdi, stat[READ].min);
 276                trace_wbt_stat(bdi, stat);
 277                return LAT_EXCEEDED;
 278        }
 279
 280        if (rqd->scale_step)
 281                trace_wbt_stat(bdi, stat);
 282
 283        return LAT_OK;
 284}
 285
 286static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
 287{
 288        struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
 289        struct rq_depth *rqd = &rwb->rq_depth;
 290
 291        trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
 292                        rwb->wb_background, rwb->wb_normal, rqd->max_depth);
 293}
 294
 295static void calc_wb_limits(struct rq_wb *rwb)
 296{
 297        if (rwb->min_lat_nsec == 0) {
 298                rwb->wb_normal = rwb->wb_background = 0;
 299        } else if (rwb->rq_depth.max_depth <= 2) {
 300                rwb->wb_normal = rwb->rq_depth.max_depth;
 301                rwb->wb_background = 1;
 302        } else {
 303                rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
 304                rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
 305        }
 306}
 307
 308static void scale_up(struct rq_wb *rwb)
 309{
 310        if (!rq_depth_scale_up(&rwb->rq_depth))
 311                return;
 312        calc_wb_limits(rwb);
 313        rwb->unknown_cnt = 0;
 314        rwb_wake_all(rwb);
 315        rwb_trace_step(rwb, tracepoint_string("scale up"));
 316}
 317
 318static void scale_down(struct rq_wb *rwb, bool hard_throttle)
 319{
 320        if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
 321                return;
 322        calc_wb_limits(rwb);
 323        rwb->unknown_cnt = 0;
 324        rwb_trace_step(rwb, tracepoint_string("scale down"));
 325}
 326
 327static void rwb_arm_timer(struct rq_wb *rwb)
 328{
 329        struct rq_depth *rqd = &rwb->rq_depth;
 330
 331        if (rqd->scale_step > 0) {
 332                /*
 333                 * We should speed this up, using some variant of a fast
 334                 * integer inverse square root calculation. Since we only do
 335                 * this for every window expiration, it's not a huge deal,
 336                 * though.
 337                 */
 338                rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
 339                                        int_sqrt((rqd->scale_step + 1) << 8));
 340        } else {
 341                /*
 342                 * For step < 0, we don't want to increase/decrease the
 343                 * window size.
 344                 */
 345                rwb->cur_win_nsec = rwb->win_nsec;
 346        }
 347
 348        blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
 349}
 350
 351static void wb_timer_fn(struct blk_stat_callback *cb)
 352{
 353        struct rq_wb *rwb = cb->data;
 354        struct rq_depth *rqd = &rwb->rq_depth;
 355        unsigned int inflight = wbt_inflight(rwb);
 356        int status;
 357
 358        status = latency_exceeded(rwb, cb->stat);
 359
 360        trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
 361                        inflight);
 362
 363        /*
 364         * If we exceeded the latency target, step down. If we did not,
 365         * step one level up. If we don't know enough to say either exceeded
 366         * or ok, then don't do anything.
 367         */
 368        switch (status) {
 369        case LAT_EXCEEDED:
 370                scale_down(rwb, true);
 371                break;
 372        case LAT_OK:
 373                scale_up(rwb);
 374                break;
 375        case LAT_UNKNOWN_WRITES:
 376                /*
 377                 * We started a the center step, but don't have a valid
 378                 * read/write sample, but we do have writes going on.
 379                 * Allow step to go negative, to increase write perf.
 380                 */
 381                scale_up(rwb);
 382                break;
 383        case LAT_UNKNOWN:
 384                if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
 385                        break;
 386                /*
 387                 * We get here when previously scaled reduced depth, and we
 388                 * currently don't have a valid read/write sample. For that
 389                 * case, slowly return to center state (step == 0).
 390                 */
 391                if (rqd->scale_step > 0)
 392                        scale_up(rwb);
 393                else if (rqd->scale_step < 0)
 394                        scale_down(rwb, false);
 395                break;
 396        default:
 397                break;
 398        }
 399
 400        /*
 401         * Re-arm timer, if we have IO in flight
 402         */
 403        if (rqd->scale_step || inflight)
 404                rwb_arm_timer(rwb);
 405}
 406
 407static void wbt_update_limits(struct rq_wb *rwb)
 408{
 409        struct rq_depth *rqd = &rwb->rq_depth;
 410
 411        rqd->scale_step = 0;
 412        rqd->scaled_max = false;
 413
 414        rq_depth_calc_max_depth(rqd);
 415        calc_wb_limits(rwb);
 416
 417        rwb_wake_all(rwb);
 418}
 419
 420u64 wbt_get_min_lat(struct request_queue *q)
 421{
 422        struct rq_qos *rqos = wbt_rq_qos(q);
 423        if (!rqos)
 424                return 0;
 425        return RQWB(rqos)->min_lat_nsec;
 426}
 427
 428void wbt_set_min_lat(struct request_queue *q, u64 val)
 429{
 430        struct rq_qos *rqos = wbt_rq_qos(q);
 431        if (!rqos)
 432                return;
 433        RQWB(rqos)->min_lat_nsec = val;
 434        RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
 435        wbt_update_limits(RQWB(rqos));
 436}
 437
 438
 439static bool close_io(struct rq_wb *rwb)
 440{
 441        const unsigned long now = jiffies;
 442
 443        return time_before(now, rwb->last_issue + HZ / 10) ||
 444                time_before(now, rwb->last_comp + HZ / 10);
 445}
 446
 447#define REQ_HIPRIO      (REQ_SYNC | REQ_META | REQ_PRIO)
 448
 449static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
 450{
 451        unsigned int limit;
 452
 453        /*
 454         * If we got disabled, just return UINT_MAX. This ensures that
 455         * we'll properly inc a new IO, and dec+wakeup at the end.
 456         */
 457        if (!rwb_enabled(rwb))
 458                return UINT_MAX;
 459
 460        if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
 461                return rwb->wb_background;
 462
 463        /*
 464         * At this point we know it's a buffered write. If this is
 465         * kswapd trying to free memory, or REQ_SYNC is set, then
 466         * it's WB_SYNC_ALL writeback, and we'll use the max limit for
 467         * that. If the write is marked as a background write, then use
 468         * the idle limit, or go to normal if we haven't had competing
 469         * IO for a bit.
 470         */
 471        if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
 472                limit = rwb->rq_depth.max_depth;
 473        else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
 474                /*
 475                 * If less than 100ms since we completed unrelated IO,
 476                 * limit us to half the depth for background writeback.
 477                 */
 478                limit = rwb->wb_background;
 479        } else
 480                limit = rwb->wb_normal;
 481
 482        return limit;
 483}
 484
 485struct wbt_wait_data {
 486        struct rq_wb *rwb;
 487        enum wbt_flags wb_acct;
 488        unsigned long rw;
 489};
 490
 491static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
 492{
 493        struct wbt_wait_data *data = private_data;
 494        return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
 495}
 496
 497static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
 498{
 499        struct wbt_wait_data *data = private_data;
 500        wbt_rqw_done(data->rwb, rqw, data->wb_acct);
 501}
 502
 503/*
 504 * Block if we will exceed our limit, or if we are currently waiting for
 505 * the timer to kick off queuing again.
 506 */
 507static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
 508                       unsigned long rw)
 509{
 510        struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
 511        struct wbt_wait_data data = {
 512                .rwb = rwb,
 513                .wb_acct = wb_acct,
 514                .rw = rw,
 515        };
 516
 517        rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
 518}
 519
 520static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
 521{
 522        switch (bio_op(bio)) {
 523        case REQ_OP_WRITE:
 524                /*
 525                 * Don't throttle WRITE_ODIRECT
 526                 */
 527                if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
 528                    (REQ_SYNC | REQ_IDLE))
 529                        return false;
 530                /* fallthrough */
 531        case REQ_OP_DISCARD:
 532                return true;
 533        default:
 534                return false;
 535        }
 536}
 537
 538static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
 539{
 540        enum wbt_flags flags = 0;
 541
 542        if (!rwb_enabled(rwb))
 543                return 0;
 544
 545        if (bio_op(bio) == REQ_OP_READ) {
 546                flags = WBT_READ;
 547        } else if (wbt_should_throttle(rwb, bio)) {
 548                if (current_is_kswapd())
 549                        flags |= WBT_KSWAPD;
 550                if (bio_op(bio) == REQ_OP_DISCARD)
 551                        flags |= WBT_DISCARD;
 552                flags |= WBT_TRACKED;
 553        }
 554        return flags;
 555}
 556
 557static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
 558{
 559        struct rq_wb *rwb = RQWB(rqos);
 560        enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
 561        __wbt_done(rqos, flags);
 562}
 563
 564/*
 565 * Returns true if the IO request should be accounted, false if not.
 566 * May sleep, if we have exceeded the writeback limits. Caller can pass
 567 * in an irq held spinlock, if it holds one when calling this function.
 568 * If we do sleep, we'll release and re-grab it.
 569 */
 570static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
 571{
 572        struct rq_wb *rwb = RQWB(rqos);
 573        enum wbt_flags flags;
 574
 575        flags = bio_to_wbt_flags(rwb, bio);
 576        if (!(flags & WBT_TRACKED)) {
 577                if (flags & WBT_READ)
 578                        wb_timestamp(rwb, &rwb->last_issue);
 579                return;
 580        }
 581
 582        __wbt_wait(rwb, flags, bio->bi_opf);
 583
 584        if (!blk_stat_is_active(rwb->cb))
 585                rwb_arm_timer(rwb);
 586}
 587
 588static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
 589{
 590        struct rq_wb *rwb = RQWB(rqos);
 591        rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
 592}
 593
 594void wbt_issue(struct rq_qos *rqos, struct request *rq)
 595{
 596        struct rq_wb *rwb = RQWB(rqos);
 597
 598        if (!rwb_enabled(rwb))
 599                return;
 600
 601        /*
 602         * Track sync issue, in case it takes a long time to complete. Allows us
 603         * to react quicker, if a sync IO takes a long time to complete. Note
 604         * that this is just a hint. The request can go away when it completes,
 605         * so it's important we never dereference it. We only use the address to
 606         * compare with, which is why we store the sync_issue time locally.
 607         */
 608        if (wbt_is_read(rq) && !rwb->sync_issue) {
 609                rwb->sync_cookie = rq;
 610                rwb->sync_issue = rq->io_start_time_ns;
 611        }
 612}
 613
 614void wbt_requeue(struct rq_qos *rqos, struct request *rq)
 615{
 616        struct rq_wb *rwb = RQWB(rqos);
 617        if (!rwb_enabled(rwb))
 618                return;
 619        if (rq == rwb->sync_cookie) {
 620                rwb->sync_issue = 0;
 621                rwb->sync_cookie = NULL;
 622        }
 623}
 624
 625void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
 626{
 627        struct rq_qos *rqos = wbt_rq_qos(q);
 628        if (rqos)
 629                RQWB(rqos)->wc = write_cache_on;
 630}
 631
 632/*
 633 * Enable wbt if defaults are configured that way
 634 */
 635void wbt_enable_default(struct request_queue *q)
 636{
 637        struct rq_qos *rqos = wbt_rq_qos(q);
 638        /* Throttling already enabled? */
 639        if (rqos)
 640                return;
 641
 642        /* Queue not registered? Maybe shutting down... */
 643        if (!blk_queue_registered(q))
 644                return;
 645
 646        if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
 647                wbt_init(q);
 648}
 649EXPORT_SYMBOL_GPL(wbt_enable_default);
 650
 651u64 wbt_default_latency_nsec(struct request_queue *q)
 652{
 653        /*
 654         * We default to 2msec for non-rotational storage, and 75msec
 655         * for rotational storage.
 656         */
 657        if (blk_queue_nonrot(q))
 658                return 2000000ULL;
 659        else
 660                return 75000000ULL;
 661}
 662
 663static int wbt_data_dir(const struct request *rq)
 664{
 665        const int op = req_op(rq);
 666
 667        if (op == REQ_OP_READ)
 668                return READ;
 669        else if (op_is_write(op))
 670                return WRITE;
 671
 672        /* don't account */
 673        return -1;
 674}
 675
 676static void wbt_queue_depth_changed(struct rq_qos *rqos)
 677{
 678        RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
 679        wbt_update_limits(RQWB(rqos));
 680}
 681
 682static void wbt_exit(struct rq_qos *rqos)
 683{
 684        struct rq_wb *rwb = RQWB(rqos);
 685        struct request_queue *q = rqos->q;
 686
 687        blk_stat_remove_callback(q, rwb->cb);
 688        blk_stat_free_callback(rwb->cb);
 689        kfree(rwb);
 690}
 691
 692/*
 693 * Disable wbt, if enabled by default.
 694 */
 695void wbt_disable_default(struct request_queue *q)
 696{
 697        struct rq_qos *rqos = wbt_rq_qos(q);
 698        struct rq_wb *rwb;
 699        if (!rqos)
 700                return;
 701        rwb = RQWB(rqos);
 702        if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
 703                blk_stat_deactivate(rwb->cb);
 704                rwb->wb_normal = 0;
 705        }
 706}
 707EXPORT_SYMBOL_GPL(wbt_disable_default);
 708
 709static struct rq_qos_ops wbt_rqos_ops = {
 710        .throttle = wbt_wait,
 711        .issue = wbt_issue,
 712        .track = wbt_track,
 713        .requeue = wbt_requeue,
 714        .done = wbt_done,
 715        .cleanup = wbt_cleanup,
 716        .queue_depth_changed = wbt_queue_depth_changed,
 717        .exit = wbt_exit,
 718};
 719
 720int wbt_init(struct request_queue *q)
 721{
 722        struct rq_wb *rwb;
 723        int i;
 724
 725        rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
 726        if (!rwb)
 727                return -ENOMEM;
 728
 729        rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
 730        if (!rwb->cb) {
 731                kfree(rwb);
 732                return -ENOMEM;
 733        }
 734
 735        for (i = 0; i < WBT_NUM_RWQ; i++)
 736                rq_wait_init(&rwb->rq_wait[i]);
 737
 738        rwb->rqos.id = RQ_QOS_WBT;
 739        rwb->rqos.ops = &wbt_rqos_ops;
 740        rwb->rqos.q = q;
 741        rwb->last_comp = rwb->last_issue = jiffies;
 742        rwb->win_nsec = RWB_WINDOW_NSEC;
 743        rwb->enable_state = WBT_STATE_ON_DEFAULT;
 744        rwb->wc = 1;
 745        rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
 746
 747        /*
 748         * Assign rwb and add the stats callback.
 749         */
 750        rq_qos_add(q, &rwb->rqos);
 751        blk_stat_add_callback(q, rwb->cb);
 752
 753        rwb->min_lat_nsec = wbt_default_latency_nsec(q);
 754
 755        wbt_queue_depth_changed(&rwb->rqos);
 756        wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 757
 758        return 0;
 759}
 760