linux/block/blk-throttle.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Interface for controlling IO bandwidth on a request queue
   4 *
   5 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/slab.h>
  10#include <linux/blkdev.h>
  11#include <linux/bio.h>
  12#include <linux/blktrace_api.h>
  13#include <linux/blk-cgroup.h>
  14#include "blk.h"
  15#include "blk-cgroup-rwstat.h"
  16
  17/* Max dispatch from a group in 1 round */
  18static int throtl_grp_quantum = 8;
  19
  20/* Total max dispatch from all groups in one round */
  21static int throtl_quantum = 32;
  22
  23/* Throttling is performed over a slice and after that slice is renewed */
  24#define DFL_THROTL_SLICE_HD (HZ / 10)
  25#define DFL_THROTL_SLICE_SSD (HZ / 50)
  26#define MAX_THROTL_SLICE (HZ)
  27#define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
  28#define MIN_THROTL_BPS (320 * 1024)
  29#define MIN_THROTL_IOPS (10)
  30#define DFL_LATENCY_TARGET (-1L)
  31#define DFL_IDLE_THRESHOLD (0)
  32#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
  33#define LATENCY_FILTERED_SSD (0)
  34/*
  35 * For HD, very small latency comes from sequential IO. Such IO is helpless to
  36 * help determine if its IO is impacted by others, hence we ignore the IO
  37 */
  38#define LATENCY_FILTERED_HD (1000L) /* 1ms */
  39
  40static struct blkcg_policy blkcg_policy_throtl;
  41
  42/* A workqueue to queue throttle related work */
  43static struct workqueue_struct *kthrotld_workqueue;
  44
  45/*
  46 * To implement hierarchical throttling, throtl_grps form a tree and bios
  47 * are dispatched upwards level by level until they reach the top and get
  48 * issued.  When dispatching bios from the children and local group at each
  49 * level, if the bios are dispatched into a single bio_list, there's a risk
  50 * of a local or child group which can queue many bios at once filling up
  51 * the list starving others.
  52 *
  53 * To avoid such starvation, dispatched bios are queued separately
  54 * according to where they came from.  When they are again dispatched to
  55 * the parent, they're popped in round-robin order so that no single source
  56 * hogs the dispatch window.
  57 *
  58 * throtl_qnode is used to keep the queued bios separated by their sources.
  59 * Bios are queued to throtl_qnode which in turn is queued to
  60 * throtl_service_queue and then dispatched in round-robin order.
  61 *
  62 * It's also used to track the reference counts on blkg's.  A qnode always
  63 * belongs to a throtl_grp and gets queued on itself or the parent, so
  64 * incrementing the reference of the associated throtl_grp when a qnode is
  65 * queued and decrementing when dequeued is enough to keep the whole blkg
  66 * tree pinned while bios are in flight.
  67 */
  68struct throtl_qnode {
  69        struct list_head        node;           /* service_queue->queued[] */
  70        struct bio_list         bios;           /* queued bios */
  71        struct throtl_grp       *tg;            /* tg this qnode belongs to */
  72};
  73
  74struct throtl_service_queue {
  75        struct throtl_service_queue *parent_sq; /* the parent service_queue */
  76
  77        /*
  78         * Bios queued directly to this service_queue or dispatched from
  79         * children throtl_grp's.
  80         */
  81        struct list_head        queued[2];      /* throtl_qnode [READ/WRITE] */
  82        unsigned int            nr_queued[2];   /* number of queued bios */
  83
  84        /*
  85         * RB tree of active children throtl_grp's, which are sorted by
  86         * their ->disptime.
  87         */
  88        struct rb_root_cached   pending_tree;   /* RB tree of active tgs */
  89        unsigned int            nr_pending;     /* # queued in the tree */
  90        unsigned long           first_pending_disptime; /* disptime of the first tg */
  91        struct timer_list       pending_timer;  /* fires on first_pending_disptime */
  92};
  93
  94enum tg_state_flags {
  95        THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
  96        THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
  97};
  98
  99#define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
 100
 101enum {
 102        LIMIT_LOW,
 103        LIMIT_MAX,
 104        LIMIT_CNT,
 105};
 106
 107struct throtl_grp {
 108        /* must be the first member */
 109        struct blkg_policy_data pd;
 110
 111        /* active throtl group service_queue member */
 112        struct rb_node rb_node;
 113
 114        /* throtl_data this group belongs to */
 115        struct throtl_data *td;
 116
 117        /* this group's service queue */
 118        struct throtl_service_queue service_queue;
 119
 120        /*
 121         * qnode_on_self is used when bios are directly queued to this
 122         * throtl_grp so that local bios compete fairly with bios
 123         * dispatched from children.  qnode_on_parent is used when bios are
 124         * dispatched from this throtl_grp into its parent and will compete
 125         * with the sibling qnode_on_parents and the parent's
 126         * qnode_on_self.
 127         */
 128        struct throtl_qnode qnode_on_self[2];
 129        struct throtl_qnode qnode_on_parent[2];
 130
 131        /*
 132         * Dispatch time in jiffies. This is the estimated time when group
 133         * will unthrottle and is ready to dispatch more bio. It is used as
 134         * key to sort active groups in service tree.
 135         */
 136        unsigned long disptime;
 137
 138        unsigned int flags;
 139
 140        /* are there any throtl rules between this group and td? */
 141        bool has_rules[2];
 142
 143        /* internally used bytes per second rate limits */
 144        uint64_t bps[2][LIMIT_CNT];
 145        /* user configured bps limits */
 146        uint64_t bps_conf[2][LIMIT_CNT];
 147
 148        /* internally used IOPS limits */
 149        unsigned int iops[2][LIMIT_CNT];
 150        /* user configured IOPS limits */
 151        unsigned int iops_conf[2][LIMIT_CNT];
 152
 153        /* Number of bytes disptached in current slice */
 154        uint64_t bytes_disp[2];
 155        /* Number of bio's dispatched in current slice */
 156        unsigned int io_disp[2];
 157
 158        unsigned long last_low_overflow_time[2];
 159
 160        uint64_t last_bytes_disp[2];
 161        unsigned int last_io_disp[2];
 162
 163        unsigned long last_check_time;
 164
 165        unsigned long latency_target; /* us */
 166        unsigned long latency_target_conf; /* us */
 167        /* When did we start a new slice */
 168        unsigned long slice_start[2];
 169        unsigned long slice_end[2];
 170
 171        unsigned long last_finish_time; /* ns / 1024 */
 172        unsigned long checked_last_finish_time; /* ns / 1024 */
 173        unsigned long avg_idletime; /* ns / 1024 */
 174        unsigned long idletime_threshold; /* us */
 175        unsigned long idletime_threshold_conf; /* us */
 176
 177        unsigned int bio_cnt; /* total bios */
 178        unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
 179        unsigned long bio_cnt_reset_time;
 180
 181        struct blkg_rwstat stat_bytes;
 182        struct blkg_rwstat stat_ios;
 183};
 184
 185/* We measure latency for request size from <= 4k to >= 1M */
 186#define LATENCY_BUCKET_SIZE 9
 187
 188struct latency_bucket {
 189        unsigned long total_latency; /* ns / 1024 */
 190        int samples;
 191};
 192
 193struct avg_latency_bucket {
 194        unsigned long latency; /* ns / 1024 */
 195        bool valid;
 196};
 197
 198struct throtl_data
 199{
 200        /* service tree for active throtl groups */
 201        struct throtl_service_queue service_queue;
 202
 203        struct request_queue *queue;
 204
 205        /* Total Number of queued bios on READ and WRITE lists */
 206        unsigned int nr_queued[2];
 207
 208        unsigned int throtl_slice;
 209
 210        /* Work for dispatching throttled bios */
 211        struct work_struct dispatch_work;
 212        unsigned int limit_index;
 213        bool limit_valid[LIMIT_CNT];
 214
 215        unsigned long low_upgrade_time;
 216        unsigned long low_downgrade_time;
 217
 218        unsigned int scale;
 219
 220        struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
 221        struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
 222        struct latency_bucket __percpu *latency_buckets[2];
 223        unsigned long last_calculate_time;
 224        unsigned long filtered_latency;
 225
 226        bool track_bio_latency;
 227};
 228
 229static void throtl_pending_timer_fn(struct timer_list *t);
 230
 231static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
 232{
 233        return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
 234}
 235
 236static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
 237{
 238        return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
 239}
 240
 241static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
 242{
 243        return pd_to_blkg(&tg->pd);
 244}
 245
 246/**
 247 * sq_to_tg - return the throl_grp the specified service queue belongs to
 248 * @sq: the throtl_service_queue of interest
 249 *
 250 * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
 251 * embedded in throtl_data, %NULL is returned.
 252 */
 253static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
 254{
 255        if (sq && sq->parent_sq)
 256                return container_of(sq, struct throtl_grp, service_queue);
 257        else
 258                return NULL;
 259}
 260
 261/**
 262 * sq_to_td - return throtl_data the specified service queue belongs to
 263 * @sq: the throtl_service_queue of interest
 264 *
 265 * A service_queue can be embedded in either a throtl_grp or throtl_data.
 266 * Determine the associated throtl_data accordingly and return it.
 267 */
 268static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
 269{
 270        struct throtl_grp *tg = sq_to_tg(sq);
 271
 272        if (tg)
 273                return tg->td;
 274        else
 275                return container_of(sq, struct throtl_data, service_queue);
 276}
 277
 278/*
 279 * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
 280 * make the IO dispatch more smooth.
 281 * Scale up: linearly scale up according to lapsed time since upgrade. For
 282 *           every throtl_slice, the limit scales up 1/2 .low limit till the
 283 *           limit hits .max limit
 284 * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
 285 */
 286static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
 287{
 288        /* arbitrary value to avoid too big scale */
 289        if (td->scale < 4096 && time_after_eq(jiffies,
 290            td->low_upgrade_time + td->scale * td->throtl_slice))
 291                td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
 292
 293        return low + (low >> 1) * td->scale;
 294}
 295
 296static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
 297{
 298        struct blkcg_gq *blkg = tg_to_blkg(tg);
 299        struct throtl_data *td;
 300        uint64_t ret;
 301
 302        if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
 303                return U64_MAX;
 304
 305        td = tg->td;
 306        ret = tg->bps[rw][td->limit_index];
 307        if (ret == 0 && td->limit_index == LIMIT_LOW) {
 308                /* intermediate node or iops isn't 0 */
 309                if (!list_empty(&blkg->blkcg->css.children) ||
 310                    tg->iops[rw][td->limit_index])
 311                        return U64_MAX;
 312                else
 313                        return MIN_THROTL_BPS;
 314        }
 315
 316        if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
 317            tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
 318                uint64_t adjusted;
 319
 320                adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
 321                ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
 322        }
 323        return ret;
 324}
 325
 326static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
 327{
 328        struct blkcg_gq *blkg = tg_to_blkg(tg);
 329        struct throtl_data *td;
 330        unsigned int ret;
 331
 332        if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
 333                return UINT_MAX;
 334
 335        td = tg->td;
 336        ret = tg->iops[rw][td->limit_index];
 337        if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
 338                /* intermediate node or bps isn't 0 */
 339                if (!list_empty(&blkg->blkcg->css.children) ||
 340                    tg->bps[rw][td->limit_index])
 341                        return UINT_MAX;
 342                else
 343                        return MIN_THROTL_IOPS;
 344        }
 345
 346        if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
 347            tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
 348                uint64_t adjusted;
 349
 350                adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
 351                if (adjusted > UINT_MAX)
 352                        adjusted = UINT_MAX;
 353                ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
 354        }
 355        return ret;
 356}
 357
 358#define request_bucket_index(sectors) \
 359        clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
 360
 361/**
 362 * throtl_log - log debug message via blktrace
 363 * @sq: the service_queue being reported
 364 * @fmt: printf format string
 365 * @args: printf args
 366 *
 367 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
 368 * throtl_grp; otherwise, just "throtl".
 369 */
 370#define throtl_log(sq, fmt, args...)    do {                            \
 371        struct throtl_grp *__tg = sq_to_tg((sq));                       \
 372        struct throtl_data *__td = sq_to_td((sq));                      \
 373                                                                        \
 374        (void)__td;                                                     \
 375        if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
 376                break;                                                  \
 377        if ((__tg)) {                                                   \
 378                blk_add_cgroup_trace_msg(__td->queue,                   \
 379                        tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
 380        } else {                                                        \
 381                blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
 382        }                                                               \
 383} while (0)
 384
 385static inline unsigned int throtl_bio_data_size(struct bio *bio)
 386{
 387        /* assume it's one sector */
 388        if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
 389                return 512;
 390        return bio->bi_iter.bi_size;
 391}
 392
 393static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
 394{
 395        INIT_LIST_HEAD(&qn->node);
 396        bio_list_init(&qn->bios);
 397        qn->tg = tg;
 398}
 399
 400/**
 401 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
 402 * @bio: bio being added
 403 * @qn: qnode to add bio to
 404 * @queued: the service_queue->queued[] list @qn belongs to
 405 *
 406 * Add @bio to @qn and put @qn on @queued if it's not already on.
 407 * @qn->tg's reference count is bumped when @qn is activated.  See the
 408 * comment on top of throtl_qnode definition for details.
 409 */
 410static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
 411                                 struct list_head *queued)
 412{
 413        bio_list_add(&qn->bios, bio);
 414        if (list_empty(&qn->node)) {
 415                list_add_tail(&qn->node, queued);
 416                blkg_get(tg_to_blkg(qn->tg));
 417        }
 418}
 419
 420/**
 421 * throtl_peek_queued - peek the first bio on a qnode list
 422 * @queued: the qnode list to peek
 423 */
 424static struct bio *throtl_peek_queued(struct list_head *queued)
 425{
 426        struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 427        struct bio *bio;
 428
 429        if (list_empty(queued))
 430                return NULL;
 431
 432        bio = bio_list_peek(&qn->bios);
 433        WARN_ON_ONCE(!bio);
 434        return bio;
 435}
 436
 437/**
 438 * throtl_pop_queued - pop the first bio form a qnode list
 439 * @queued: the qnode list to pop a bio from
 440 * @tg_to_put: optional out argument for throtl_grp to put
 441 *
 442 * Pop the first bio from the qnode list @queued.  After popping, the first
 443 * qnode is removed from @queued if empty or moved to the end of @queued so
 444 * that the popping order is round-robin.
 445 *
 446 * When the first qnode is removed, its associated throtl_grp should be put
 447 * too.  If @tg_to_put is NULL, this function automatically puts it;
 448 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
 449 * responsible for putting it.
 450 */
 451static struct bio *throtl_pop_queued(struct list_head *queued,
 452                                     struct throtl_grp **tg_to_put)
 453{
 454        struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 455        struct bio *bio;
 456
 457        if (list_empty(queued))
 458                return NULL;
 459
 460        bio = bio_list_pop(&qn->bios);
 461        WARN_ON_ONCE(!bio);
 462
 463        if (bio_list_empty(&qn->bios)) {
 464                list_del_init(&qn->node);
 465                if (tg_to_put)
 466                        *tg_to_put = qn->tg;
 467                else
 468                        blkg_put(tg_to_blkg(qn->tg));
 469        } else {
 470                list_move_tail(&qn->node, queued);
 471        }
 472
 473        return bio;
 474}
 475
 476/* init a service_queue, assumes the caller zeroed it */
 477static void throtl_service_queue_init(struct throtl_service_queue *sq)
 478{
 479        INIT_LIST_HEAD(&sq->queued[0]);
 480        INIT_LIST_HEAD(&sq->queued[1]);
 481        sq->pending_tree = RB_ROOT_CACHED;
 482        timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
 483}
 484
 485static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
 486                                                struct request_queue *q,
 487                                                struct blkcg *blkcg)
 488{
 489        struct throtl_grp *tg;
 490        int rw;
 491
 492        tg = kzalloc_node(sizeof(*tg), gfp, q->node);
 493        if (!tg)
 494                return NULL;
 495
 496        if (blkg_rwstat_init(&tg->stat_bytes, gfp))
 497                goto err_free_tg;
 498
 499        if (blkg_rwstat_init(&tg->stat_ios, gfp))
 500                goto err_exit_stat_bytes;
 501
 502        throtl_service_queue_init(&tg->service_queue);
 503
 504        for (rw = READ; rw <= WRITE; rw++) {
 505                throtl_qnode_init(&tg->qnode_on_self[rw], tg);
 506                throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
 507        }
 508
 509        RB_CLEAR_NODE(&tg->rb_node);
 510        tg->bps[READ][LIMIT_MAX] = U64_MAX;
 511        tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
 512        tg->iops[READ][LIMIT_MAX] = UINT_MAX;
 513        tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
 514        tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
 515        tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
 516        tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
 517        tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
 518        /* LIMIT_LOW will have default value 0 */
 519
 520        tg->latency_target = DFL_LATENCY_TARGET;
 521        tg->latency_target_conf = DFL_LATENCY_TARGET;
 522        tg->idletime_threshold = DFL_IDLE_THRESHOLD;
 523        tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
 524
 525        return &tg->pd;
 526
 527err_exit_stat_bytes:
 528        blkg_rwstat_exit(&tg->stat_bytes);
 529err_free_tg:
 530        kfree(tg);
 531        return NULL;
 532}
 533
 534static void throtl_pd_init(struct blkg_policy_data *pd)
 535{
 536        struct throtl_grp *tg = pd_to_tg(pd);
 537        struct blkcg_gq *blkg = tg_to_blkg(tg);
 538        struct throtl_data *td = blkg->q->td;
 539        struct throtl_service_queue *sq = &tg->service_queue;
 540
 541        /*
 542         * If on the default hierarchy, we switch to properly hierarchical
 543         * behavior where limits on a given throtl_grp are applied to the
 544         * whole subtree rather than just the group itself.  e.g. If 16M
 545         * read_bps limit is set on the root group, the whole system can't
 546         * exceed 16M for the device.
 547         *
 548         * If not on the default hierarchy, the broken flat hierarchy
 549         * behavior is retained where all throtl_grps are treated as if
 550         * they're all separate root groups right below throtl_data.
 551         * Limits of a group don't interact with limits of other groups
 552         * regardless of the position of the group in the hierarchy.
 553         */
 554        sq->parent_sq = &td->service_queue;
 555        if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
 556                sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
 557        tg->td = td;
 558}
 559
 560/*
 561 * Set has_rules[] if @tg or any of its parents have limits configured.
 562 * This doesn't require walking up to the top of the hierarchy as the
 563 * parent's has_rules[] is guaranteed to be correct.
 564 */
 565static void tg_update_has_rules(struct throtl_grp *tg)
 566{
 567        struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
 568        struct throtl_data *td = tg->td;
 569        int rw;
 570
 571        for (rw = READ; rw <= WRITE; rw++)
 572                tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
 573                        (td->limit_valid[td->limit_index] &&
 574                         (tg_bps_limit(tg, rw) != U64_MAX ||
 575                          tg_iops_limit(tg, rw) != UINT_MAX));
 576}
 577
 578static void throtl_pd_online(struct blkg_policy_data *pd)
 579{
 580        struct throtl_grp *tg = pd_to_tg(pd);
 581        /*
 582         * We don't want new groups to escape the limits of its ancestors.
 583         * Update has_rules[] after a new group is brought online.
 584         */
 585        tg_update_has_rules(tg);
 586}
 587
 588static void blk_throtl_update_limit_valid(struct throtl_data *td)
 589{
 590        struct cgroup_subsys_state *pos_css;
 591        struct blkcg_gq *blkg;
 592        bool low_valid = false;
 593
 594        rcu_read_lock();
 595        blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
 596                struct throtl_grp *tg = blkg_to_tg(blkg);
 597
 598                if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
 599                    tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
 600                        low_valid = true;
 601                        break;
 602                }
 603        }
 604        rcu_read_unlock();
 605
 606        td->limit_valid[LIMIT_LOW] = low_valid;
 607}
 608
 609static void throtl_upgrade_state(struct throtl_data *td);
 610static void throtl_pd_offline(struct blkg_policy_data *pd)
 611{
 612        struct throtl_grp *tg = pd_to_tg(pd);
 613
 614        tg->bps[READ][LIMIT_LOW] = 0;
 615        tg->bps[WRITE][LIMIT_LOW] = 0;
 616        tg->iops[READ][LIMIT_LOW] = 0;
 617        tg->iops[WRITE][LIMIT_LOW] = 0;
 618
 619        blk_throtl_update_limit_valid(tg->td);
 620
 621        if (!tg->td->limit_valid[tg->td->limit_index])
 622                throtl_upgrade_state(tg->td);
 623}
 624
 625static void throtl_pd_free(struct blkg_policy_data *pd)
 626{
 627        struct throtl_grp *tg = pd_to_tg(pd);
 628
 629        del_timer_sync(&tg->service_queue.pending_timer);
 630        blkg_rwstat_exit(&tg->stat_bytes);
 631        blkg_rwstat_exit(&tg->stat_ios);
 632        kfree(tg);
 633}
 634
 635static struct throtl_grp *
 636throtl_rb_first(struct throtl_service_queue *parent_sq)
 637{
 638        struct rb_node *n;
 639        /* Service tree is empty */
 640        if (!parent_sq->nr_pending)
 641                return NULL;
 642
 643        n = rb_first_cached(&parent_sq->pending_tree);
 644        WARN_ON_ONCE(!n);
 645        if (!n)
 646                return NULL;
 647        return rb_entry_tg(n);
 648}
 649
 650static void throtl_rb_erase(struct rb_node *n,
 651                            struct throtl_service_queue *parent_sq)
 652{
 653        rb_erase_cached(n, &parent_sq->pending_tree);
 654        RB_CLEAR_NODE(n);
 655        --parent_sq->nr_pending;
 656}
 657
 658static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
 659{
 660        struct throtl_grp *tg;
 661
 662        tg = throtl_rb_first(parent_sq);
 663        if (!tg)
 664                return;
 665
 666        parent_sq->first_pending_disptime = tg->disptime;
 667}
 668
 669static void tg_service_queue_add(struct throtl_grp *tg)
 670{
 671        struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
 672        struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
 673        struct rb_node *parent = NULL;
 674        struct throtl_grp *__tg;
 675        unsigned long key = tg->disptime;
 676        bool leftmost = true;
 677
 678        while (*node != NULL) {
 679                parent = *node;
 680                __tg = rb_entry_tg(parent);
 681
 682                if (time_before(key, __tg->disptime))
 683                        node = &parent->rb_left;
 684                else {
 685                        node = &parent->rb_right;
 686                        leftmost = false;
 687                }
 688        }
 689
 690        rb_link_node(&tg->rb_node, parent, node);
 691        rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
 692                               leftmost);
 693}
 694
 695static void __throtl_enqueue_tg(struct throtl_grp *tg)
 696{
 697        tg_service_queue_add(tg);
 698        tg->flags |= THROTL_TG_PENDING;
 699        tg->service_queue.parent_sq->nr_pending++;
 700}
 701
 702static void throtl_enqueue_tg(struct throtl_grp *tg)
 703{
 704        if (!(tg->flags & THROTL_TG_PENDING))
 705                __throtl_enqueue_tg(tg);
 706}
 707
 708static void __throtl_dequeue_tg(struct throtl_grp *tg)
 709{
 710        throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
 711        tg->flags &= ~THROTL_TG_PENDING;
 712}
 713
 714static void throtl_dequeue_tg(struct throtl_grp *tg)
 715{
 716        if (tg->flags & THROTL_TG_PENDING)
 717                __throtl_dequeue_tg(tg);
 718}
 719
 720/* Call with queue lock held */
 721static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
 722                                          unsigned long expires)
 723{
 724        unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
 725
 726        /*
 727         * Since we are adjusting the throttle limit dynamically, the sleep
 728         * time calculated according to previous limit might be invalid. It's
 729         * possible the cgroup sleep time is very long and no other cgroups
 730         * have IO running so notify the limit changes. Make sure the cgroup
 731         * doesn't sleep too long to avoid the missed notification.
 732         */
 733        if (time_after(expires, max_expire))
 734                expires = max_expire;
 735        mod_timer(&sq->pending_timer, expires);
 736        throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
 737                   expires - jiffies, jiffies);
 738}
 739
 740/**
 741 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
 742 * @sq: the service_queue to schedule dispatch for
 743 * @force: force scheduling
 744 *
 745 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
 746 * dispatch time of the first pending child.  Returns %true if either timer
 747 * is armed or there's no pending child left.  %false if the current
 748 * dispatch window is still open and the caller should continue
 749 * dispatching.
 750 *
 751 * If @force is %true, the dispatch timer is always scheduled and this
 752 * function is guaranteed to return %true.  This is to be used when the
 753 * caller can't dispatch itself and needs to invoke pending_timer
 754 * unconditionally.  Note that forced scheduling is likely to induce short
 755 * delay before dispatch starts even if @sq->first_pending_disptime is not
 756 * in the future and thus shouldn't be used in hot paths.
 757 */
 758static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
 759                                          bool force)
 760{
 761        /* any pending children left? */
 762        if (!sq->nr_pending)
 763                return true;
 764
 765        update_min_dispatch_time(sq);
 766
 767        /* is the next dispatch time in the future? */
 768        if (force || time_after(sq->first_pending_disptime, jiffies)) {
 769                throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
 770                return true;
 771        }
 772
 773        /* tell the caller to continue dispatching */
 774        return false;
 775}
 776
 777static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
 778                bool rw, unsigned long start)
 779{
 780        tg->bytes_disp[rw] = 0;
 781        tg->io_disp[rw] = 0;
 782
 783        /*
 784         * Previous slice has expired. We must have trimmed it after last
 785         * bio dispatch. That means since start of last slice, we never used
 786         * that bandwidth. Do try to make use of that bandwidth while giving
 787         * credit.
 788         */
 789        if (time_after_eq(start, tg->slice_start[rw]))
 790                tg->slice_start[rw] = start;
 791
 792        tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
 793        throtl_log(&tg->service_queue,
 794                   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
 795                   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 796                   tg->slice_end[rw], jiffies);
 797}
 798
 799static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
 800{
 801        tg->bytes_disp[rw] = 0;
 802        tg->io_disp[rw] = 0;
 803        tg->slice_start[rw] = jiffies;
 804        tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
 805        throtl_log(&tg->service_queue,
 806                   "[%c] new slice start=%lu end=%lu jiffies=%lu",
 807                   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 808                   tg->slice_end[rw], jiffies);
 809}
 810
 811static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
 812                                        unsigned long jiffy_end)
 813{
 814        tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
 815}
 816
 817static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
 818                                       unsigned long jiffy_end)
 819{
 820        tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
 821        throtl_log(&tg->service_queue,
 822                   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 823                   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 824                   tg->slice_end[rw], jiffies);
 825}
 826
 827/* Determine if previously allocated or extended slice is complete or not */
 828static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
 829{
 830        if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 831                return false;
 832
 833        return true;
 834}
 835
 836/* Trim the used slices and adjust slice start accordingly */
 837static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
 838{
 839        unsigned long nr_slices, time_elapsed, io_trim;
 840        u64 bytes_trim, tmp;
 841
 842        BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 843
 844        /*
 845         * If bps are unlimited (-1), then time slice don't get
 846         * renewed. Don't try to trim the slice if slice is used. A new
 847         * slice will start when appropriate.
 848         */
 849        if (throtl_slice_used(tg, rw))
 850                return;
 851
 852        /*
 853         * A bio has been dispatched. Also adjust slice_end. It might happen
 854         * that initially cgroup limit was very low resulting in high
 855         * slice_end, but later limit was bumped up and bio was dispached
 856         * sooner, then we need to reduce slice_end. A high bogus slice_end
 857         * is bad because it does not allow new slice to start.
 858         */
 859
 860        throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
 861
 862        time_elapsed = jiffies - tg->slice_start[rw];
 863
 864        nr_slices = time_elapsed / tg->td->throtl_slice;
 865
 866        if (!nr_slices)
 867                return;
 868        tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
 869        do_div(tmp, HZ);
 870        bytes_trim = tmp;
 871
 872        io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
 873                HZ;
 874
 875        if (!bytes_trim && !io_trim)
 876                return;
 877
 878        if (tg->bytes_disp[rw] >= bytes_trim)
 879                tg->bytes_disp[rw] -= bytes_trim;
 880        else
 881                tg->bytes_disp[rw] = 0;
 882
 883        if (tg->io_disp[rw] >= io_trim)
 884                tg->io_disp[rw] -= io_trim;
 885        else
 886                tg->io_disp[rw] = 0;
 887
 888        tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
 889
 890        throtl_log(&tg->service_queue,
 891                   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
 892                   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
 893                   tg->slice_start[rw], tg->slice_end[rw], jiffies);
 894}
 895
 896static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
 897                                  unsigned long *wait)
 898{
 899        bool rw = bio_data_dir(bio);
 900        unsigned int io_allowed;
 901        unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 902        u64 tmp;
 903
 904        jiffy_elapsed = jiffies - tg->slice_start[rw];
 905
 906        /* Round up to the next throttle slice, wait time must be nonzero */
 907        jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
 908
 909        /*
 910         * jiffy_elapsed_rnd should not be a big value as minimum iops can be
 911         * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 912         * will allow dispatch after 1 second and after that slice should
 913         * have been trimmed.
 914         */
 915
 916        tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
 917        do_div(tmp, HZ);
 918
 919        if (tmp > UINT_MAX)
 920                io_allowed = UINT_MAX;
 921        else
 922                io_allowed = tmp;
 923
 924        if (tg->io_disp[rw] + 1 <= io_allowed) {
 925                if (wait)
 926                        *wait = 0;
 927                return true;
 928        }
 929
 930        /* Calc approx time to dispatch */
 931        jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
 932
 933        if (wait)
 934                *wait = jiffy_wait;
 935        return false;
 936}
 937
 938static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
 939                                 unsigned long *wait)
 940{
 941        bool rw = bio_data_dir(bio);
 942        u64 bytes_allowed, extra_bytes, tmp;
 943        unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 944        unsigned int bio_size = throtl_bio_data_size(bio);
 945
 946        jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 947
 948        /* Slice has just started. Consider one slice interval */
 949        if (!jiffy_elapsed)
 950                jiffy_elapsed_rnd = tg->td->throtl_slice;
 951
 952        jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
 953
 954        tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
 955        do_div(tmp, HZ);
 956        bytes_allowed = tmp;
 957
 958        if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
 959                if (wait)
 960                        *wait = 0;
 961                return true;
 962        }
 963
 964        /* Calc approx time to dispatch */
 965        extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
 966        jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
 967
 968        if (!jiffy_wait)
 969                jiffy_wait = 1;
 970
 971        /*
 972         * This wait time is without taking into consideration the rounding
 973         * up we did. Add that time also.
 974         */
 975        jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 976        if (wait)
 977                *wait = jiffy_wait;
 978        return false;
 979}
 980
 981/*
 982 * Returns whether one can dispatch a bio or not. Also returns approx number
 983 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 984 */
 985static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 986                            unsigned long *wait)
 987{
 988        bool rw = bio_data_dir(bio);
 989        unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 990
 991        /*
 992         * Currently whole state machine of group depends on first bio
 993         * queued in the group bio list. So one should not be calling
 994         * this function with a different bio if there are other bios
 995         * queued.
 996         */
 997        BUG_ON(tg->service_queue.nr_queued[rw] &&
 998               bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
 999
1000        /* If tg->bps = -1, then BW is unlimited */
1001        if (tg_bps_limit(tg, rw) == U64_MAX &&
1002            tg_iops_limit(tg, rw) == UINT_MAX) {
1003                if (wait)
1004                        *wait = 0;
1005                return true;
1006        }
1007
1008        /*
1009         * If previous slice expired, start a new one otherwise renew/extend
1010         * existing slice to make sure it is at least throtl_slice interval
1011         * long since now. New slice is started only for empty throttle group.
1012         * If there is queued bio, that means there should be an active
1013         * slice and it should be extended instead.
1014         */
1015        if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1016                throtl_start_new_slice(tg, rw);
1017        else {
1018                if (time_before(tg->slice_end[rw],
1019                    jiffies + tg->td->throtl_slice))
1020                        throtl_extend_slice(tg, rw,
1021                                jiffies + tg->td->throtl_slice);
1022        }
1023
1024        if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1025            tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1026                if (wait)
1027                        *wait = 0;
1028                return true;
1029        }
1030
1031        max_wait = max(bps_wait, iops_wait);
1032
1033        if (wait)
1034                *wait = max_wait;
1035
1036        if (time_before(tg->slice_end[rw], jiffies + max_wait))
1037                throtl_extend_slice(tg, rw, jiffies + max_wait);
1038
1039        return false;
1040}
1041
1042static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1043{
1044        bool rw = bio_data_dir(bio);
1045        unsigned int bio_size = throtl_bio_data_size(bio);
1046
1047        /* Charge the bio to the group */
1048        tg->bytes_disp[rw] += bio_size;
1049        tg->io_disp[rw]++;
1050        tg->last_bytes_disp[rw] += bio_size;
1051        tg->last_io_disp[rw]++;
1052
1053        /*
1054         * BIO_THROTTLED is used to prevent the same bio to be throttled
1055         * more than once as a throttled bio will go through blk-throtl the
1056         * second time when it eventually gets issued.  Set it when a bio
1057         * is being charged to a tg.
1058         */
1059        if (!bio_flagged(bio, BIO_THROTTLED))
1060                bio_set_flag(bio, BIO_THROTTLED);
1061}
1062
1063/**
1064 * throtl_add_bio_tg - add a bio to the specified throtl_grp
1065 * @bio: bio to add
1066 * @qn: qnode to use
1067 * @tg: the target throtl_grp
1068 *
1069 * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1070 * tg->qnode_on_self[] is used.
1071 */
1072static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1073                              struct throtl_grp *tg)
1074{
1075        struct throtl_service_queue *sq = &tg->service_queue;
1076        bool rw = bio_data_dir(bio);
1077
1078        if (!qn)
1079                qn = &tg->qnode_on_self[rw];
1080
1081        /*
1082         * If @tg doesn't currently have any bios queued in the same
1083         * direction, queueing @bio can change when @tg should be
1084         * dispatched.  Mark that @tg was empty.  This is automatically
1085         * cleaered on the next tg_update_disptime().
1086         */
1087        if (!sq->nr_queued[rw])
1088                tg->flags |= THROTL_TG_WAS_EMPTY;
1089
1090        throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1091
1092        sq->nr_queued[rw]++;
1093        throtl_enqueue_tg(tg);
1094}
1095
1096static void tg_update_disptime(struct throtl_grp *tg)
1097{
1098        struct throtl_service_queue *sq = &tg->service_queue;
1099        unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1100        struct bio *bio;
1101
1102        bio = throtl_peek_queued(&sq->queued[READ]);
1103        if (bio)
1104                tg_may_dispatch(tg, bio, &read_wait);
1105
1106        bio = throtl_peek_queued(&sq->queued[WRITE]);
1107        if (bio)
1108                tg_may_dispatch(tg, bio, &write_wait);
1109
1110        min_wait = min(read_wait, write_wait);
1111        disptime = jiffies + min_wait;
1112
1113        /* Update dispatch time */
1114        throtl_dequeue_tg(tg);
1115        tg->disptime = disptime;
1116        throtl_enqueue_tg(tg);
1117
1118        /* see throtl_add_bio_tg() */
1119        tg->flags &= ~THROTL_TG_WAS_EMPTY;
1120}
1121
1122static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1123                                        struct throtl_grp *parent_tg, bool rw)
1124{
1125        if (throtl_slice_used(parent_tg, rw)) {
1126                throtl_start_new_slice_with_credit(parent_tg, rw,
1127                                child_tg->slice_start[rw]);
1128        }
1129
1130}
1131
1132static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1133{
1134        struct throtl_service_queue *sq = &tg->service_queue;
1135        struct throtl_service_queue *parent_sq = sq->parent_sq;
1136        struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1137        struct throtl_grp *tg_to_put = NULL;
1138        struct bio *bio;
1139
1140        /*
1141         * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1142         * from @tg may put its reference and @parent_sq might end up
1143         * getting released prematurely.  Remember the tg to put and put it
1144         * after @bio is transferred to @parent_sq.
1145         */
1146        bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1147        sq->nr_queued[rw]--;
1148
1149        throtl_charge_bio(tg, bio);
1150
1151        /*
1152         * If our parent is another tg, we just need to transfer @bio to
1153         * the parent using throtl_add_bio_tg().  If our parent is
1154         * @td->service_queue, @bio is ready to be issued.  Put it on its
1155         * bio_lists[] and decrease total number queued.  The caller is
1156         * responsible for issuing these bios.
1157         */
1158        if (parent_tg) {
1159                throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1160                start_parent_slice_with_credit(tg, parent_tg, rw);
1161        } else {
1162                throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1163                                     &parent_sq->queued[rw]);
1164                BUG_ON(tg->td->nr_queued[rw] <= 0);
1165                tg->td->nr_queued[rw]--;
1166        }
1167
1168        throtl_trim_slice(tg, rw);
1169
1170        if (tg_to_put)
1171                blkg_put(tg_to_blkg(tg_to_put));
1172}
1173
1174static int throtl_dispatch_tg(struct throtl_grp *tg)
1175{
1176        struct throtl_service_queue *sq = &tg->service_queue;
1177        unsigned int nr_reads = 0, nr_writes = 0;
1178        unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1179        unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1180        struct bio *bio;
1181
1182        /* Try to dispatch 75% READS and 25% WRITES */
1183
1184        while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1185               tg_may_dispatch(tg, bio, NULL)) {
1186
1187                tg_dispatch_one_bio(tg, bio_data_dir(bio));
1188                nr_reads++;
1189
1190                if (nr_reads >= max_nr_reads)
1191                        break;
1192        }
1193
1194        while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1195               tg_may_dispatch(tg, bio, NULL)) {
1196
1197                tg_dispatch_one_bio(tg, bio_data_dir(bio));
1198                nr_writes++;
1199
1200                if (nr_writes >= max_nr_writes)
1201                        break;
1202        }
1203
1204        return nr_reads + nr_writes;
1205}
1206
1207static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1208{
1209        unsigned int nr_disp = 0;
1210
1211        while (1) {
1212                struct throtl_grp *tg = throtl_rb_first(parent_sq);
1213                struct throtl_service_queue *sq;
1214
1215                if (!tg)
1216                        break;
1217
1218                if (time_before(jiffies, tg->disptime))
1219                        break;
1220
1221                throtl_dequeue_tg(tg);
1222
1223                nr_disp += throtl_dispatch_tg(tg);
1224
1225                sq = &tg->service_queue;
1226                if (sq->nr_queued[0] || sq->nr_queued[1])
1227                        tg_update_disptime(tg);
1228
1229                if (nr_disp >= throtl_quantum)
1230                        break;
1231        }
1232
1233        return nr_disp;
1234}
1235
1236static bool throtl_can_upgrade(struct throtl_data *td,
1237        struct throtl_grp *this_tg);
1238/**
1239 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1240 * @t: the pending_timer member of the throtl_service_queue being serviced
1241 *
1242 * This timer is armed when a child throtl_grp with active bio's become
1243 * pending and queued on the service_queue's pending_tree and expires when
1244 * the first child throtl_grp should be dispatched.  This function
1245 * dispatches bio's from the children throtl_grps to the parent
1246 * service_queue.
1247 *
1248 * If the parent's parent is another throtl_grp, dispatching is propagated
1249 * by either arming its pending_timer or repeating dispatch directly.  If
1250 * the top-level service_tree is reached, throtl_data->dispatch_work is
1251 * kicked so that the ready bio's are issued.
1252 */
1253static void throtl_pending_timer_fn(struct timer_list *t)
1254{
1255        struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1256        struct throtl_grp *tg = sq_to_tg(sq);
1257        struct throtl_data *td = sq_to_td(sq);
1258        struct request_queue *q = td->queue;
1259        struct throtl_service_queue *parent_sq;
1260        bool dispatched;
1261        int ret;
1262
1263        spin_lock_irq(&q->queue_lock);
1264        if (throtl_can_upgrade(td, NULL))
1265                throtl_upgrade_state(td);
1266
1267again:
1268        parent_sq = sq->parent_sq;
1269        dispatched = false;
1270
1271        while (true) {
1272                throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1273                           sq->nr_queued[READ] + sq->nr_queued[WRITE],
1274                           sq->nr_queued[READ], sq->nr_queued[WRITE]);
1275
1276                ret = throtl_select_dispatch(sq);
1277                if (ret) {
1278                        throtl_log(sq, "bios disp=%u", ret);
1279                        dispatched = true;
1280                }
1281
1282                if (throtl_schedule_next_dispatch(sq, false))
1283                        break;
1284
1285                /* this dispatch windows is still open, relax and repeat */
1286                spin_unlock_irq(&q->queue_lock);
1287                cpu_relax();
1288                spin_lock_irq(&q->queue_lock);
1289        }
1290
1291        if (!dispatched)
1292                goto out_unlock;
1293
1294        if (parent_sq) {
1295                /* @parent_sq is another throl_grp, propagate dispatch */
1296                if (tg->flags & THROTL_TG_WAS_EMPTY) {
1297                        tg_update_disptime(tg);
1298                        if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1299                                /* window is already open, repeat dispatching */
1300                                sq = parent_sq;
1301                                tg = sq_to_tg(sq);
1302                                goto again;
1303                        }
1304                }
1305        } else {
1306                /* reached the top-level, queue issueing */
1307                queue_work(kthrotld_workqueue, &td->dispatch_work);
1308        }
1309out_unlock:
1310        spin_unlock_irq(&q->queue_lock);
1311}
1312
1313/**
1314 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1315 * @work: work item being executed
1316 *
1317 * This function is queued for execution when bio's reach the bio_lists[]
1318 * of throtl_data->service_queue.  Those bio's are ready and issued by this
1319 * function.
1320 */
1321static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1322{
1323        struct throtl_data *td = container_of(work, struct throtl_data,
1324                                              dispatch_work);
1325        struct throtl_service_queue *td_sq = &td->service_queue;
1326        struct request_queue *q = td->queue;
1327        struct bio_list bio_list_on_stack;
1328        struct bio *bio;
1329        struct blk_plug plug;
1330        int rw;
1331
1332        bio_list_init(&bio_list_on_stack);
1333
1334        spin_lock_irq(&q->queue_lock);
1335        for (rw = READ; rw <= WRITE; rw++)
1336                while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1337                        bio_list_add(&bio_list_on_stack, bio);
1338        spin_unlock_irq(&q->queue_lock);
1339
1340        if (!bio_list_empty(&bio_list_on_stack)) {
1341                blk_start_plug(&plug);
1342                while((bio = bio_list_pop(&bio_list_on_stack)))
1343                        generic_make_request(bio);
1344                blk_finish_plug(&plug);
1345        }
1346}
1347
1348static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1349                              int off)
1350{
1351        struct throtl_grp *tg = pd_to_tg(pd);
1352        u64 v = *(u64 *)((void *)tg + off);
1353
1354        if (v == U64_MAX)
1355                return 0;
1356        return __blkg_prfill_u64(sf, pd, v);
1357}
1358
1359static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1360                               int off)
1361{
1362        struct throtl_grp *tg = pd_to_tg(pd);
1363        unsigned int v = *(unsigned int *)((void *)tg + off);
1364
1365        if (v == UINT_MAX)
1366                return 0;
1367        return __blkg_prfill_u64(sf, pd, v);
1368}
1369
1370static int tg_print_conf_u64(struct seq_file *sf, void *v)
1371{
1372        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1373                          &blkcg_policy_throtl, seq_cft(sf)->private, false);
1374        return 0;
1375}
1376
1377static int tg_print_conf_uint(struct seq_file *sf, void *v)
1378{
1379        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1380                          &blkcg_policy_throtl, seq_cft(sf)->private, false);
1381        return 0;
1382}
1383
1384static void tg_conf_updated(struct throtl_grp *tg, bool global)
1385{
1386        struct throtl_service_queue *sq = &tg->service_queue;
1387        struct cgroup_subsys_state *pos_css;
1388        struct blkcg_gq *blkg;
1389
1390        throtl_log(&tg->service_queue,
1391                   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1392                   tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1393                   tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1394
1395        /*
1396         * Update has_rules[] flags for the updated tg's subtree.  A tg is
1397         * considered to have rules if either the tg itself or any of its
1398         * ancestors has rules.  This identifies groups without any
1399         * restrictions in the whole hierarchy and allows them to bypass
1400         * blk-throttle.
1401         */
1402        blkg_for_each_descendant_pre(blkg, pos_css,
1403                        global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1404                struct throtl_grp *this_tg = blkg_to_tg(blkg);
1405                struct throtl_grp *parent_tg;
1406
1407                tg_update_has_rules(this_tg);
1408                /* ignore root/second level */
1409                if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1410                    !blkg->parent->parent)
1411                        continue;
1412                parent_tg = blkg_to_tg(blkg->parent);
1413                /*
1414                 * make sure all children has lower idle time threshold and
1415                 * higher latency target
1416                 */
1417                this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1418                                parent_tg->idletime_threshold);
1419                this_tg->latency_target = max(this_tg->latency_target,
1420                                parent_tg->latency_target);
1421        }
1422
1423        /*
1424         * We're already holding queue_lock and know @tg is valid.  Let's
1425         * apply the new config directly.
1426         *
1427         * Restart the slices for both READ and WRITES. It might happen
1428         * that a group's limit are dropped suddenly and we don't want to
1429         * account recently dispatched IO with new low rate.
1430         */
1431        throtl_start_new_slice(tg, 0);
1432        throtl_start_new_slice(tg, 1);
1433
1434        if (tg->flags & THROTL_TG_PENDING) {
1435                tg_update_disptime(tg);
1436                throtl_schedule_next_dispatch(sq->parent_sq, true);
1437        }
1438}
1439
1440static ssize_t tg_set_conf(struct kernfs_open_file *of,
1441                           char *buf, size_t nbytes, loff_t off, bool is_u64)
1442{
1443        struct blkcg *blkcg = css_to_blkcg(of_css(of));
1444        struct blkg_conf_ctx ctx;
1445        struct throtl_grp *tg;
1446        int ret;
1447        u64 v;
1448
1449        ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1450        if (ret)
1451                return ret;
1452
1453        ret = -EINVAL;
1454        if (sscanf(ctx.body, "%llu", &v) != 1)
1455                goto out_finish;
1456        if (!v)
1457                v = U64_MAX;
1458
1459        tg = blkg_to_tg(ctx.blkg);
1460
1461        if (is_u64)
1462                *(u64 *)((void *)tg + of_cft(of)->private) = v;
1463        else
1464                *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1465
1466        tg_conf_updated(tg, false);
1467        ret = 0;
1468out_finish:
1469        blkg_conf_finish(&ctx);
1470        return ret ?: nbytes;
1471}
1472
1473static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1474                               char *buf, size_t nbytes, loff_t off)
1475{
1476        return tg_set_conf(of, buf, nbytes, off, true);
1477}
1478
1479static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1480                                char *buf, size_t nbytes, loff_t off)
1481{
1482        return tg_set_conf(of, buf, nbytes, off, false);
1483}
1484
1485static int tg_print_rwstat(struct seq_file *sf, void *v)
1486{
1487        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1488                          blkg_prfill_rwstat, &blkcg_policy_throtl,
1489                          seq_cft(sf)->private, true);
1490        return 0;
1491}
1492
1493static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1494                                      struct blkg_policy_data *pd, int off)
1495{
1496        struct blkg_rwstat_sample sum;
1497
1498        blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1499                                  &sum);
1500        return __blkg_prfill_rwstat(sf, pd, &sum);
1501}
1502
1503static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1504{
1505        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1506                          tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1507                          seq_cft(sf)->private, true);
1508        return 0;
1509}
1510
1511static struct cftype throtl_legacy_files[] = {
1512        {
1513                .name = "throttle.read_bps_device",
1514                .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1515                .seq_show = tg_print_conf_u64,
1516                .write = tg_set_conf_u64,
1517        },
1518        {
1519                .name = "throttle.write_bps_device",
1520                .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1521                .seq_show = tg_print_conf_u64,
1522                .write = tg_set_conf_u64,
1523        },
1524        {
1525                .name = "throttle.read_iops_device",
1526                .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1527                .seq_show = tg_print_conf_uint,
1528                .write = tg_set_conf_uint,
1529        },
1530        {
1531                .name = "throttle.write_iops_device",
1532                .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1533                .seq_show = tg_print_conf_uint,
1534                .write = tg_set_conf_uint,
1535        },
1536        {
1537                .name = "throttle.io_service_bytes",
1538                .private = offsetof(struct throtl_grp, stat_bytes),
1539                .seq_show = tg_print_rwstat,
1540        },
1541        {
1542                .name = "throttle.io_service_bytes_recursive",
1543                .private = offsetof(struct throtl_grp, stat_bytes),
1544                .seq_show = tg_print_rwstat_recursive,
1545        },
1546        {
1547                .name = "throttle.io_serviced",
1548                .private = offsetof(struct throtl_grp, stat_ios),
1549                .seq_show = tg_print_rwstat,
1550        },
1551        {
1552                .name = "throttle.io_serviced_recursive",
1553                .private = offsetof(struct throtl_grp, stat_ios),
1554                .seq_show = tg_print_rwstat_recursive,
1555        },
1556        { }     /* terminate */
1557};
1558
1559static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1560                         int off)
1561{
1562        struct throtl_grp *tg = pd_to_tg(pd);
1563        const char *dname = blkg_dev_name(pd->blkg);
1564        char bufs[4][21] = { "max", "max", "max", "max" };
1565        u64 bps_dft;
1566        unsigned int iops_dft;
1567        char idle_time[26] = "";
1568        char latency_time[26] = "";
1569
1570        if (!dname)
1571                return 0;
1572
1573        if (off == LIMIT_LOW) {
1574                bps_dft = 0;
1575                iops_dft = 0;
1576        } else {
1577                bps_dft = U64_MAX;
1578                iops_dft = UINT_MAX;
1579        }
1580
1581        if (tg->bps_conf[READ][off] == bps_dft &&
1582            tg->bps_conf[WRITE][off] == bps_dft &&
1583            tg->iops_conf[READ][off] == iops_dft &&
1584            tg->iops_conf[WRITE][off] == iops_dft &&
1585            (off != LIMIT_LOW ||
1586             (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1587              tg->latency_target_conf == DFL_LATENCY_TARGET)))
1588                return 0;
1589
1590        if (tg->bps_conf[READ][off] != U64_MAX)
1591                snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1592                        tg->bps_conf[READ][off]);
1593        if (tg->bps_conf[WRITE][off] != U64_MAX)
1594                snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1595                        tg->bps_conf[WRITE][off]);
1596        if (tg->iops_conf[READ][off] != UINT_MAX)
1597                snprintf(bufs[2], sizeof(bufs[2]), "%u",
1598                        tg->iops_conf[READ][off]);
1599        if (tg->iops_conf[WRITE][off] != UINT_MAX)
1600                snprintf(bufs[3], sizeof(bufs[3]), "%u",
1601                        tg->iops_conf[WRITE][off]);
1602        if (off == LIMIT_LOW) {
1603                if (tg->idletime_threshold_conf == ULONG_MAX)
1604                        strcpy(idle_time, " idle=max");
1605                else
1606                        snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1607                                tg->idletime_threshold_conf);
1608
1609                if (tg->latency_target_conf == ULONG_MAX)
1610                        strcpy(latency_time, " latency=max");
1611                else
1612                        snprintf(latency_time, sizeof(latency_time),
1613                                " latency=%lu", tg->latency_target_conf);
1614        }
1615
1616        seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1617                   dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1618                   latency_time);
1619        return 0;
1620}
1621
1622static int tg_print_limit(struct seq_file *sf, void *v)
1623{
1624        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1625                          &blkcg_policy_throtl, seq_cft(sf)->private, false);
1626        return 0;
1627}
1628
1629static ssize_t tg_set_limit(struct kernfs_open_file *of,
1630                          char *buf, size_t nbytes, loff_t off)
1631{
1632        struct blkcg *blkcg = css_to_blkcg(of_css(of));
1633        struct blkg_conf_ctx ctx;
1634        struct throtl_grp *tg;
1635        u64 v[4];
1636        unsigned long idle_time;
1637        unsigned long latency_time;
1638        int ret;
1639        int index = of_cft(of)->private;
1640
1641        ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1642        if (ret)
1643                return ret;
1644
1645        tg = blkg_to_tg(ctx.blkg);
1646
1647        v[0] = tg->bps_conf[READ][index];
1648        v[1] = tg->bps_conf[WRITE][index];
1649        v[2] = tg->iops_conf[READ][index];
1650        v[3] = tg->iops_conf[WRITE][index];
1651
1652        idle_time = tg->idletime_threshold_conf;
1653        latency_time = tg->latency_target_conf;
1654        while (true) {
1655                char tok[27];   /* wiops=18446744073709551616 */
1656                char *p;
1657                u64 val = U64_MAX;
1658                int len;
1659
1660                if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1661                        break;
1662                if (tok[0] == '\0')
1663                        break;
1664                ctx.body += len;
1665
1666                ret = -EINVAL;
1667                p = tok;
1668                strsep(&p, "=");
1669                if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1670                        goto out_finish;
1671
1672                ret = -ERANGE;
1673                if (!val)
1674                        goto out_finish;
1675
1676                ret = -EINVAL;
1677                if (!strcmp(tok, "rbps"))
1678                        v[0] = val;
1679                else if (!strcmp(tok, "wbps"))
1680                        v[1] = val;
1681                else if (!strcmp(tok, "riops"))
1682                        v[2] = min_t(u64, val, UINT_MAX);
1683                else if (!strcmp(tok, "wiops"))
1684                        v[3] = min_t(u64, val, UINT_MAX);
1685                else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1686                        idle_time = val;
1687                else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1688                        latency_time = val;
1689                else
1690                        goto out_finish;
1691        }
1692
1693        tg->bps_conf[READ][index] = v[0];
1694        tg->bps_conf[WRITE][index] = v[1];
1695        tg->iops_conf[READ][index] = v[2];
1696        tg->iops_conf[WRITE][index] = v[3];
1697
1698        if (index == LIMIT_MAX) {
1699                tg->bps[READ][index] = v[0];
1700                tg->bps[WRITE][index] = v[1];
1701                tg->iops[READ][index] = v[2];
1702                tg->iops[WRITE][index] = v[3];
1703        }
1704        tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1705                tg->bps_conf[READ][LIMIT_MAX]);
1706        tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1707                tg->bps_conf[WRITE][LIMIT_MAX]);
1708        tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1709                tg->iops_conf[READ][LIMIT_MAX]);
1710        tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1711                tg->iops_conf[WRITE][LIMIT_MAX]);
1712        tg->idletime_threshold_conf = idle_time;
1713        tg->latency_target_conf = latency_time;
1714
1715        /* force user to configure all settings for low limit  */
1716        if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1717              tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1718            tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1719            tg->latency_target_conf == DFL_LATENCY_TARGET) {
1720                tg->bps[READ][LIMIT_LOW] = 0;
1721                tg->bps[WRITE][LIMIT_LOW] = 0;
1722                tg->iops[READ][LIMIT_LOW] = 0;
1723                tg->iops[WRITE][LIMIT_LOW] = 0;
1724                tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1725                tg->latency_target = DFL_LATENCY_TARGET;
1726        } else if (index == LIMIT_LOW) {
1727                tg->idletime_threshold = tg->idletime_threshold_conf;
1728                tg->latency_target = tg->latency_target_conf;
1729        }
1730
1731        blk_throtl_update_limit_valid(tg->td);
1732        if (tg->td->limit_valid[LIMIT_LOW]) {
1733                if (index == LIMIT_LOW)
1734                        tg->td->limit_index = LIMIT_LOW;
1735        } else
1736                tg->td->limit_index = LIMIT_MAX;
1737        tg_conf_updated(tg, index == LIMIT_LOW &&
1738                tg->td->limit_valid[LIMIT_LOW]);
1739        ret = 0;
1740out_finish:
1741        blkg_conf_finish(&ctx);
1742        return ret ?: nbytes;
1743}
1744
1745static struct cftype throtl_files[] = {
1746#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1747        {
1748                .name = "low",
1749                .flags = CFTYPE_NOT_ON_ROOT,
1750                .seq_show = tg_print_limit,
1751                .write = tg_set_limit,
1752                .private = LIMIT_LOW,
1753        },
1754#endif
1755        {
1756                .name = "max",
1757                .flags = CFTYPE_NOT_ON_ROOT,
1758                .seq_show = tg_print_limit,
1759                .write = tg_set_limit,
1760                .private = LIMIT_MAX,
1761        },
1762        { }     /* terminate */
1763};
1764
1765static void throtl_shutdown_wq(struct request_queue *q)
1766{
1767        struct throtl_data *td = q->td;
1768
1769        cancel_work_sync(&td->dispatch_work);
1770}
1771
1772static struct blkcg_policy blkcg_policy_throtl = {
1773        .dfl_cftypes            = throtl_files,
1774        .legacy_cftypes         = throtl_legacy_files,
1775
1776        .pd_alloc_fn            = throtl_pd_alloc,
1777        .pd_init_fn             = throtl_pd_init,
1778        .pd_online_fn           = throtl_pd_online,
1779        .pd_offline_fn          = throtl_pd_offline,
1780        .pd_free_fn             = throtl_pd_free,
1781};
1782
1783static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1784{
1785        unsigned long rtime = jiffies, wtime = jiffies;
1786
1787        if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1788                rtime = tg->last_low_overflow_time[READ];
1789        if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1790                wtime = tg->last_low_overflow_time[WRITE];
1791        return min(rtime, wtime);
1792}
1793
1794/* tg should not be an intermediate node */
1795static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1796{
1797        struct throtl_service_queue *parent_sq;
1798        struct throtl_grp *parent = tg;
1799        unsigned long ret = __tg_last_low_overflow_time(tg);
1800
1801        while (true) {
1802                parent_sq = parent->service_queue.parent_sq;
1803                parent = sq_to_tg(parent_sq);
1804                if (!parent)
1805                        break;
1806
1807                /*
1808                 * The parent doesn't have low limit, it always reaches low
1809                 * limit. Its overflow time is useless for children
1810                 */
1811                if (!parent->bps[READ][LIMIT_LOW] &&
1812                    !parent->iops[READ][LIMIT_LOW] &&
1813                    !parent->bps[WRITE][LIMIT_LOW] &&
1814                    !parent->iops[WRITE][LIMIT_LOW])
1815                        continue;
1816                if (time_after(__tg_last_low_overflow_time(parent), ret))
1817                        ret = __tg_last_low_overflow_time(parent);
1818        }
1819        return ret;
1820}
1821
1822static bool throtl_tg_is_idle(struct throtl_grp *tg)
1823{
1824        /*
1825         * cgroup is idle if:
1826         * - single idle is too long, longer than a fixed value (in case user
1827         *   configure a too big threshold) or 4 times of idletime threshold
1828         * - average think time is more than threshold
1829         * - IO latency is largely below threshold
1830         */
1831        unsigned long time;
1832        bool ret;
1833
1834        time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1835        ret = tg->latency_target == DFL_LATENCY_TARGET ||
1836              tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1837              (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1838              tg->avg_idletime > tg->idletime_threshold ||
1839              (tg->latency_target && tg->bio_cnt &&
1840                tg->bad_bio_cnt * 5 < tg->bio_cnt);
1841        throtl_log(&tg->service_queue,
1842                "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1843                tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1844                tg->bio_cnt, ret, tg->td->scale);
1845        return ret;
1846}
1847
1848static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1849{
1850        struct throtl_service_queue *sq = &tg->service_queue;
1851        bool read_limit, write_limit;
1852
1853        /*
1854         * if cgroup reaches low limit (if low limit is 0, the cgroup always
1855         * reaches), it's ok to upgrade to next limit
1856         */
1857        read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1858        write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1859        if (!read_limit && !write_limit)
1860                return true;
1861        if (read_limit && sq->nr_queued[READ] &&
1862            (!write_limit || sq->nr_queued[WRITE]))
1863                return true;
1864        if (write_limit && sq->nr_queued[WRITE] &&
1865            (!read_limit || sq->nr_queued[READ]))
1866                return true;
1867
1868        if (time_after_eq(jiffies,
1869                tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1870            throtl_tg_is_idle(tg))
1871                return true;
1872        return false;
1873}
1874
1875static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1876{
1877        while (true) {
1878                if (throtl_tg_can_upgrade(tg))
1879                        return true;
1880                tg = sq_to_tg(tg->service_queue.parent_sq);
1881                if (!tg || !tg_to_blkg(tg)->parent)
1882                        return false;
1883        }
1884        return false;
1885}
1886
1887static bool throtl_can_upgrade(struct throtl_data *td,
1888        struct throtl_grp *this_tg)
1889{
1890        struct cgroup_subsys_state *pos_css;
1891        struct blkcg_gq *blkg;
1892
1893        if (td->limit_index != LIMIT_LOW)
1894                return false;
1895
1896        if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1897                return false;
1898
1899        rcu_read_lock();
1900        blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1901                struct throtl_grp *tg = blkg_to_tg(blkg);
1902
1903                if (tg == this_tg)
1904                        continue;
1905                if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1906                        continue;
1907                if (!throtl_hierarchy_can_upgrade(tg)) {
1908                        rcu_read_unlock();
1909                        return false;
1910                }
1911        }
1912        rcu_read_unlock();
1913        return true;
1914}
1915
1916static void throtl_upgrade_check(struct throtl_grp *tg)
1917{
1918        unsigned long now = jiffies;
1919
1920        if (tg->td->limit_index != LIMIT_LOW)
1921                return;
1922
1923        if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1924                return;
1925
1926        tg->last_check_time = now;
1927
1928        if (!time_after_eq(now,
1929             __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1930                return;
1931
1932        if (throtl_can_upgrade(tg->td, NULL))
1933                throtl_upgrade_state(tg->td);
1934}
1935
1936static void throtl_upgrade_state(struct throtl_data *td)
1937{
1938        struct cgroup_subsys_state *pos_css;
1939        struct blkcg_gq *blkg;
1940
1941        throtl_log(&td->service_queue, "upgrade to max");
1942        td->limit_index = LIMIT_MAX;
1943        td->low_upgrade_time = jiffies;
1944        td->scale = 0;
1945        rcu_read_lock();
1946        blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1947                struct throtl_grp *tg = blkg_to_tg(blkg);
1948                struct throtl_service_queue *sq = &tg->service_queue;
1949
1950                tg->disptime = jiffies - 1;
1951                throtl_select_dispatch(sq);
1952                throtl_schedule_next_dispatch(sq, true);
1953        }
1954        rcu_read_unlock();
1955        throtl_select_dispatch(&td->service_queue);
1956        throtl_schedule_next_dispatch(&td->service_queue, true);
1957        queue_work(kthrotld_workqueue, &td->dispatch_work);
1958}
1959
1960static void throtl_downgrade_state(struct throtl_data *td, int new)
1961{
1962        td->scale /= 2;
1963
1964        throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1965        if (td->scale) {
1966                td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1967                return;
1968        }
1969
1970        td->limit_index = new;
1971        td->low_downgrade_time = jiffies;
1972}
1973
1974static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1975{
1976        struct throtl_data *td = tg->td;
1977        unsigned long now = jiffies;
1978
1979        /*
1980         * If cgroup is below low limit, consider downgrade and throttle other
1981         * cgroups
1982         */
1983        if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1984            time_after_eq(now, tg_last_low_overflow_time(tg) +
1985                                        td->throtl_slice) &&
1986            (!throtl_tg_is_idle(tg) ||
1987             !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1988                return true;
1989        return false;
1990}
1991
1992static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1993{
1994        while (true) {
1995                if (!throtl_tg_can_downgrade(tg))
1996                        return false;
1997                tg = sq_to_tg(tg->service_queue.parent_sq);
1998                if (!tg || !tg_to_blkg(tg)->parent)
1999                        break;
2000        }
2001        return true;
2002}
2003
2004static void throtl_downgrade_check(struct throtl_grp *tg)
2005{
2006        uint64_t bps;
2007        unsigned int iops;
2008        unsigned long elapsed_time;
2009        unsigned long now = jiffies;
2010
2011        if (tg->td->limit_index != LIMIT_MAX ||
2012            !tg->td->limit_valid[LIMIT_LOW])
2013                return;
2014        if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
2015                return;
2016        if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
2017                return;
2018
2019        elapsed_time = now - tg->last_check_time;
2020        tg->last_check_time = now;
2021
2022        if (time_before(now, tg_last_low_overflow_time(tg) +
2023                        tg->td->throtl_slice))
2024                return;
2025
2026        if (tg->bps[READ][LIMIT_LOW]) {
2027                bps = tg->last_bytes_disp[READ] * HZ;
2028                do_div(bps, elapsed_time);
2029                if (bps >= tg->bps[READ][LIMIT_LOW])
2030                        tg->last_low_overflow_time[READ] = now;
2031        }
2032
2033        if (tg->bps[WRITE][LIMIT_LOW]) {
2034                bps = tg->last_bytes_disp[WRITE] * HZ;
2035                do_div(bps, elapsed_time);
2036                if (bps >= tg->bps[WRITE][LIMIT_LOW])
2037                        tg->last_low_overflow_time[WRITE] = now;
2038        }
2039
2040        if (tg->iops[READ][LIMIT_LOW]) {
2041                iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2042                if (iops >= tg->iops[READ][LIMIT_LOW])
2043                        tg->last_low_overflow_time[READ] = now;
2044        }
2045
2046        if (tg->iops[WRITE][LIMIT_LOW]) {
2047                iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2048                if (iops >= tg->iops[WRITE][LIMIT_LOW])
2049                        tg->last_low_overflow_time[WRITE] = now;
2050        }
2051
2052        /*
2053         * If cgroup is below low limit, consider downgrade and throttle other
2054         * cgroups
2055         */
2056        if (throtl_hierarchy_can_downgrade(tg))
2057                throtl_downgrade_state(tg->td, LIMIT_LOW);
2058
2059        tg->last_bytes_disp[READ] = 0;
2060        tg->last_bytes_disp[WRITE] = 0;
2061        tg->last_io_disp[READ] = 0;
2062        tg->last_io_disp[WRITE] = 0;
2063}
2064
2065static void blk_throtl_update_idletime(struct throtl_grp *tg)
2066{
2067        unsigned long now = ktime_get_ns() >> 10;
2068        unsigned long last_finish_time = tg->last_finish_time;
2069
2070        if (now <= last_finish_time || last_finish_time == 0 ||
2071            last_finish_time == tg->checked_last_finish_time)
2072                return;
2073
2074        tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2075        tg->checked_last_finish_time = last_finish_time;
2076}
2077
2078#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2079static void throtl_update_latency_buckets(struct throtl_data *td)
2080{
2081        struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2082        int i, cpu, rw;
2083        unsigned long last_latency[2] = { 0 };
2084        unsigned long latency[2];
2085
2086        if (!blk_queue_nonrot(td->queue))
2087                return;
2088        if (time_before(jiffies, td->last_calculate_time + HZ))
2089                return;
2090        td->last_calculate_time = jiffies;
2091
2092        memset(avg_latency, 0, sizeof(avg_latency));
2093        for (rw = READ; rw <= WRITE; rw++) {
2094                for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2095                        struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2096
2097                        for_each_possible_cpu(cpu) {
2098                                struct latency_bucket *bucket;
2099
2100                                /* this isn't race free, but ok in practice */
2101                                bucket = per_cpu_ptr(td->latency_buckets[rw],
2102                                        cpu);
2103                                tmp->total_latency += bucket[i].total_latency;
2104                                tmp->samples += bucket[i].samples;
2105                                bucket[i].total_latency = 0;
2106                                bucket[i].samples = 0;
2107                        }
2108
2109                        if (tmp->samples >= 32) {
2110                                int samples = tmp->samples;
2111
2112                                latency[rw] = tmp->total_latency;
2113
2114                                tmp->total_latency = 0;
2115                                tmp->samples = 0;
2116                                latency[rw] /= samples;
2117                                if (latency[rw] == 0)
2118                                        continue;
2119                                avg_latency[rw][i].latency = latency[rw];
2120                        }
2121                }
2122        }
2123
2124        for (rw = READ; rw <= WRITE; rw++) {
2125                for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2126                        if (!avg_latency[rw][i].latency) {
2127                                if (td->avg_buckets[rw][i].latency < last_latency[rw])
2128                                        td->avg_buckets[rw][i].latency =
2129                                                last_latency[rw];
2130                                continue;
2131                        }
2132
2133                        if (!td->avg_buckets[rw][i].valid)
2134                                latency[rw] = avg_latency[rw][i].latency;
2135                        else
2136                                latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2137                                        avg_latency[rw][i].latency) >> 3;
2138
2139                        td->avg_buckets[rw][i].latency = max(latency[rw],
2140                                last_latency[rw]);
2141                        td->avg_buckets[rw][i].valid = true;
2142                        last_latency[rw] = td->avg_buckets[rw][i].latency;
2143                }
2144        }
2145
2146        for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2147                throtl_log(&td->service_queue,
2148                        "Latency bucket %d: read latency=%ld, read valid=%d, "
2149                        "write latency=%ld, write valid=%d", i,
2150                        td->avg_buckets[READ][i].latency,
2151                        td->avg_buckets[READ][i].valid,
2152                        td->avg_buckets[WRITE][i].latency,
2153                        td->avg_buckets[WRITE][i].valid);
2154}
2155#else
2156static inline void throtl_update_latency_buckets(struct throtl_data *td)
2157{
2158}
2159#endif
2160
2161bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2162                    struct bio *bio)
2163{
2164        struct throtl_qnode *qn = NULL;
2165        struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2166        struct throtl_service_queue *sq;
2167        bool rw = bio_data_dir(bio);
2168        bool throttled = false;
2169        struct throtl_data *td = tg->td;
2170
2171        WARN_ON_ONCE(!rcu_read_lock_held());
2172
2173        /* see throtl_charge_bio() */
2174        if (bio_flagged(bio, BIO_THROTTLED))
2175                goto out;
2176
2177        if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
2178                blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2179                                bio->bi_iter.bi_size);
2180                blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2181        }
2182
2183        if (!tg->has_rules[rw])
2184                goto out;
2185
2186        spin_lock_irq(&q->queue_lock);
2187
2188        throtl_update_latency_buckets(td);
2189
2190        blk_throtl_update_idletime(tg);
2191
2192        sq = &tg->service_queue;
2193
2194again:
2195        while (true) {
2196                if (tg->last_low_overflow_time[rw] == 0)
2197                        tg->last_low_overflow_time[rw] = jiffies;
2198                throtl_downgrade_check(tg);
2199                throtl_upgrade_check(tg);
2200                /* throtl is FIFO - if bios are already queued, should queue */
2201                if (sq->nr_queued[rw])
2202                        break;
2203
2204                /* if above limits, break to queue */
2205                if (!tg_may_dispatch(tg, bio, NULL)) {
2206                        tg->last_low_overflow_time[rw] = jiffies;
2207                        if (throtl_can_upgrade(td, tg)) {
2208                                throtl_upgrade_state(td);
2209                                goto again;
2210                        }
2211                        break;
2212                }
2213
2214                /* within limits, let's charge and dispatch directly */
2215                throtl_charge_bio(tg, bio);
2216
2217                /*
2218                 * We need to trim slice even when bios are not being queued
2219                 * otherwise it might happen that a bio is not queued for
2220                 * a long time and slice keeps on extending and trim is not
2221                 * called for a long time. Now if limits are reduced suddenly
2222                 * we take into account all the IO dispatched so far at new
2223                 * low rate and * newly queued IO gets a really long dispatch
2224                 * time.
2225                 *
2226                 * So keep on trimming slice even if bio is not queued.
2227                 */
2228                throtl_trim_slice(tg, rw);
2229
2230                /*
2231                 * @bio passed through this layer without being throttled.
2232                 * Climb up the ladder.  If we''re already at the top, it
2233                 * can be executed directly.
2234                 */
2235                qn = &tg->qnode_on_parent[rw];
2236                sq = sq->parent_sq;
2237                tg = sq_to_tg(sq);
2238                if (!tg)
2239                        goto out_unlock;
2240        }
2241
2242        /* out-of-limit, queue to @tg */
2243        throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2244                   rw == READ ? 'R' : 'W',
2245                   tg->bytes_disp[rw], bio->bi_iter.bi_size,
2246                   tg_bps_limit(tg, rw),
2247                   tg->io_disp[rw], tg_iops_limit(tg, rw),
2248                   sq->nr_queued[READ], sq->nr_queued[WRITE]);
2249
2250        tg->last_low_overflow_time[rw] = jiffies;
2251
2252        td->nr_queued[rw]++;
2253        throtl_add_bio_tg(bio, qn, tg);
2254        throttled = true;
2255
2256        /*
2257         * Update @tg's dispatch time and force schedule dispatch if @tg
2258         * was empty before @bio.  The forced scheduling isn't likely to
2259         * cause undue delay as @bio is likely to be dispatched directly if
2260         * its @tg's disptime is not in the future.
2261         */
2262        if (tg->flags & THROTL_TG_WAS_EMPTY) {
2263                tg_update_disptime(tg);
2264                throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2265        }
2266
2267out_unlock:
2268        spin_unlock_irq(&q->queue_lock);
2269out:
2270        bio_set_flag(bio, BIO_THROTTLED);
2271
2272#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2273        if (throttled || !td->track_bio_latency)
2274                bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2275#endif
2276        return throttled;
2277}
2278
2279#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2280static void throtl_track_latency(struct throtl_data *td, sector_t size,
2281        int op, unsigned long time)
2282{
2283        struct latency_bucket *latency;
2284        int index;
2285
2286        if (!td || td->limit_index != LIMIT_LOW ||
2287            !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2288            !blk_queue_nonrot(td->queue))
2289                return;
2290
2291        index = request_bucket_index(size);
2292
2293        latency = get_cpu_ptr(td->latency_buckets[op]);
2294        latency[index].total_latency += time;
2295        latency[index].samples++;
2296        put_cpu_ptr(td->latency_buckets[op]);
2297}
2298
2299void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2300{
2301        struct request_queue *q = rq->q;
2302        struct throtl_data *td = q->td;
2303
2304        throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2305                             time_ns >> 10);
2306}
2307
2308void blk_throtl_bio_endio(struct bio *bio)
2309{
2310        struct blkcg_gq *blkg;
2311        struct throtl_grp *tg;
2312        u64 finish_time_ns;
2313        unsigned long finish_time;
2314        unsigned long start_time;
2315        unsigned long lat;
2316        int rw = bio_data_dir(bio);
2317
2318        blkg = bio->bi_blkg;
2319        if (!blkg)
2320                return;
2321        tg = blkg_to_tg(blkg);
2322
2323        finish_time_ns = ktime_get_ns();
2324        tg->last_finish_time = finish_time_ns >> 10;
2325
2326        start_time = bio_issue_time(&bio->bi_issue) >> 10;
2327        finish_time = __bio_issue_time(finish_time_ns) >> 10;
2328        if (!start_time || finish_time <= start_time)
2329                return;
2330
2331        lat = finish_time - start_time;
2332        /* this is only for bio based driver */
2333        if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2334                throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2335                                     bio_op(bio), lat);
2336
2337        if (tg->latency_target && lat >= tg->td->filtered_latency) {
2338                int bucket;
2339                unsigned int threshold;
2340
2341                bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2342                threshold = tg->td->avg_buckets[rw][bucket].latency +
2343                        tg->latency_target;
2344                if (lat > threshold)
2345                        tg->bad_bio_cnt++;
2346                /*
2347                 * Not race free, could get wrong count, which means cgroups
2348                 * will be throttled
2349                 */
2350                tg->bio_cnt++;
2351        }
2352
2353        if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2354                tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2355                tg->bio_cnt /= 2;
2356                tg->bad_bio_cnt /= 2;
2357        }
2358}
2359#endif
2360
2361/*
2362 * Dispatch all bios from all children tg's queued on @parent_sq.  On
2363 * return, @parent_sq is guaranteed to not have any active children tg's
2364 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2365 */
2366static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2367{
2368        struct throtl_grp *tg;
2369
2370        while ((tg = throtl_rb_first(parent_sq))) {
2371                struct throtl_service_queue *sq = &tg->service_queue;
2372                struct bio *bio;
2373
2374                throtl_dequeue_tg(tg);
2375
2376                while ((bio = throtl_peek_queued(&sq->queued[READ])))
2377                        tg_dispatch_one_bio(tg, bio_data_dir(bio));
2378                while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2379                        tg_dispatch_one_bio(tg, bio_data_dir(bio));
2380        }
2381}
2382
2383/**
2384 * blk_throtl_drain - drain throttled bios
2385 * @q: request_queue to drain throttled bios for
2386 *
2387 * Dispatch all currently throttled bios on @q through ->make_request_fn().
2388 */
2389void blk_throtl_drain(struct request_queue *q)
2390        __releases(&q->queue_lock) __acquires(&q->queue_lock)
2391{
2392        struct throtl_data *td = q->td;
2393        struct blkcg_gq *blkg;
2394        struct cgroup_subsys_state *pos_css;
2395        struct bio *bio;
2396        int rw;
2397
2398        rcu_read_lock();
2399
2400        /*
2401         * Drain each tg while doing post-order walk on the blkg tree, so
2402         * that all bios are propagated to td->service_queue.  It'd be
2403         * better to walk service_queue tree directly but blkg walk is
2404         * easier.
2405         */
2406        blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2407                tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2408
2409        /* finally, transfer bios from top-level tg's into the td */
2410        tg_drain_bios(&td->service_queue);
2411
2412        rcu_read_unlock();
2413        spin_unlock_irq(&q->queue_lock);
2414
2415        /* all bios now should be in td->service_queue, issue them */
2416        for (rw = READ; rw <= WRITE; rw++)
2417                while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2418                                                NULL)))
2419                        generic_make_request(bio);
2420
2421        spin_lock_irq(&q->queue_lock);
2422}
2423
2424int blk_throtl_init(struct request_queue *q)
2425{
2426        struct throtl_data *td;
2427        int ret;
2428
2429        td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2430        if (!td)
2431                return -ENOMEM;
2432        td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2433                LATENCY_BUCKET_SIZE, __alignof__(u64));
2434        if (!td->latency_buckets[READ]) {
2435                kfree(td);
2436                return -ENOMEM;
2437        }
2438        td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2439                LATENCY_BUCKET_SIZE, __alignof__(u64));
2440        if (!td->latency_buckets[WRITE]) {
2441                free_percpu(td->latency_buckets[READ]);
2442                kfree(td);
2443                return -ENOMEM;
2444        }
2445
2446        INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2447        throtl_service_queue_init(&td->service_queue);
2448
2449        q->td = td;
2450        td->queue = q;
2451
2452        td->limit_valid[LIMIT_MAX] = true;
2453        td->limit_index = LIMIT_MAX;
2454        td->low_upgrade_time = jiffies;
2455        td->low_downgrade_time = jiffies;
2456
2457        /* activate policy */
2458        ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2459        if (ret) {
2460                free_percpu(td->latency_buckets[READ]);
2461                free_percpu(td->latency_buckets[WRITE]);
2462                kfree(td);
2463        }
2464        return ret;
2465}
2466
2467void blk_throtl_exit(struct request_queue *q)
2468{
2469        BUG_ON(!q->td);
2470        throtl_shutdown_wq(q);
2471        blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2472        free_percpu(q->td->latency_buckets[READ]);
2473        free_percpu(q->td->latency_buckets[WRITE]);
2474        kfree(q->td);
2475}
2476
2477void blk_throtl_register_queue(struct request_queue *q)
2478{
2479        struct throtl_data *td;
2480        int i;
2481
2482        td = q->td;
2483        BUG_ON(!td);
2484
2485        if (blk_queue_nonrot(q)) {
2486                td->throtl_slice = DFL_THROTL_SLICE_SSD;
2487                td->filtered_latency = LATENCY_FILTERED_SSD;
2488        } else {
2489                td->throtl_slice = DFL_THROTL_SLICE_HD;
2490                td->filtered_latency = LATENCY_FILTERED_HD;
2491                for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2492                        td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2493                        td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2494                }
2495        }
2496#ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2497        /* if no low limit, use previous default */
2498        td->throtl_slice = DFL_THROTL_SLICE_HD;
2499#endif
2500
2501        td->track_bio_latency = !queue_is_mq(q);
2502        if (!td->track_bio_latency)
2503                blk_stat_enable_accounting(q);
2504}
2505
2506#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2507ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2508{
2509        if (!q->td)
2510                return -EINVAL;
2511        return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2512}
2513
2514ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2515        const char *page, size_t count)
2516{
2517        unsigned long v;
2518        unsigned long t;
2519
2520        if (!q->td)
2521                return -EINVAL;
2522        if (kstrtoul(page, 10, &v))
2523                return -EINVAL;
2524        t = msecs_to_jiffies(v);
2525        if (t == 0 || t > MAX_THROTL_SLICE)
2526                return -EINVAL;
2527        q->td->throtl_slice = t;
2528        return count;
2529}
2530#endif
2531
2532static int __init throtl_init(void)
2533{
2534        kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2535        if (!kthrotld_workqueue)
2536                panic("Failed to create kthrotld\n");
2537
2538        return blkcg_policy_register(&blkcg_policy_throtl);
2539}
2540
2541module_init(throtl_init);
2542