linux/block/blk-throttle.c
<<
>>
Prefs
   1/*
   2 * Interface for controlling IO bandwidth on a request queue
   3 *
   4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/slab.h>
   9#include <linux/blkdev.h>
  10#include <linux/bio.h>
  11#include <linux/blktrace_api.h>
  12#include <linux/blk-cgroup.h>
  13#include "blk.h"
  14
  15/* Max dispatch from a group in 1 round */
  16static int throtl_grp_quantum = 8;
  17
  18/* Total max dispatch from all groups in one round */
  19static int throtl_quantum = 32;
  20
  21/* Throttling is performed over 100ms slice and after that slice is renewed */
  22static unsigned long throtl_slice = HZ/10;      /* 100 ms */
  23
  24static struct blkcg_policy blkcg_policy_throtl;
  25
  26/* A workqueue to queue throttle related work */
  27static struct workqueue_struct *kthrotld_workqueue;
  28
  29/*
  30 * To implement hierarchical throttling, throtl_grps form a tree and bios
  31 * are dispatched upwards level by level until they reach the top and get
  32 * issued.  When dispatching bios from the children and local group at each
  33 * level, if the bios are dispatched into a single bio_list, there's a risk
  34 * of a local or child group which can queue many bios at once filling up
  35 * the list starving others.
  36 *
  37 * To avoid such starvation, dispatched bios are queued separately
  38 * according to where they came from.  When they are again dispatched to
  39 * the parent, they're popped in round-robin order so that no single source
  40 * hogs the dispatch window.
  41 *
  42 * throtl_qnode is used to keep the queued bios separated by their sources.
  43 * Bios are queued to throtl_qnode which in turn is queued to
  44 * throtl_service_queue and then dispatched in round-robin order.
  45 *
  46 * It's also used to track the reference counts on blkg's.  A qnode always
  47 * belongs to a throtl_grp and gets queued on itself or the parent, so
  48 * incrementing the reference of the associated throtl_grp when a qnode is
  49 * queued and decrementing when dequeued is enough to keep the whole blkg
  50 * tree pinned while bios are in flight.
  51 */
  52struct throtl_qnode {
  53        struct list_head        node;           /* service_queue->queued[] */
  54        struct bio_list         bios;           /* queued bios */
  55        struct throtl_grp       *tg;            /* tg this qnode belongs to */
  56};
  57
  58struct throtl_service_queue {
  59        struct throtl_service_queue *parent_sq; /* the parent service_queue */
  60
  61        /*
  62         * Bios queued directly to this service_queue or dispatched from
  63         * children throtl_grp's.
  64         */
  65        struct list_head        queued[2];      /* throtl_qnode [READ/WRITE] */
  66        unsigned int            nr_queued[2];   /* number of queued bios */
  67
  68        /*
  69         * RB tree of active children throtl_grp's, which are sorted by
  70         * their ->disptime.
  71         */
  72        struct rb_root          pending_tree;   /* RB tree of active tgs */
  73        struct rb_node          *first_pending; /* first node in the tree */
  74        unsigned int            nr_pending;     /* # queued in the tree */
  75        unsigned long           first_pending_disptime; /* disptime of the first tg */
  76        struct timer_list       pending_timer;  /* fires on first_pending_disptime */
  77};
  78
  79enum tg_state_flags {
  80        THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
  81        THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
  82};
  83
  84#define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
  85
  86struct throtl_grp {
  87        /* must be the first member */
  88        struct blkg_policy_data pd;
  89
  90        /* active throtl group service_queue member */
  91        struct rb_node rb_node;
  92
  93        /* throtl_data this group belongs to */
  94        struct throtl_data *td;
  95
  96        /* this group's service queue */
  97        struct throtl_service_queue service_queue;
  98
  99        /*
 100         * qnode_on_self is used when bios are directly queued to this
 101         * throtl_grp so that local bios compete fairly with bios
 102         * dispatched from children.  qnode_on_parent is used when bios are
 103         * dispatched from this throtl_grp into its parent and will compete
 104         * with the sibling qnode_on_parents and the parent's
 105         * qnode_on_self.
 106         */
 107        struct throtl_qnode qnode_on_self[2];
 108        struct throtl_qnode qnode_on_parent[2];
 109
 110        /*
 111         * Dispatch time in jiffies. This is the estimated time when group
 112         * will unthrottle and is ready to dispatch more bio. It is used as
 113         * key to sort active groups in service tree.
 114         */
 115        unsigned long disptime;
 116
 117        unsigned int flags;
 118
 119        /* are there any throtl rules between this group and td? */
 120        bool has_rules[2];
 121
 122        /* bytes per second rate limits */
 123        uint64_t bps[2];
 124
 125        /* IOPS limits */
 126        unsigned int iops[2];
 127
 128        /* Number of bytes disptached in current slice */
 129        uint64_t bytes_disp[2];
 130        /* Number of bio's dispatched in current slice */
 131        unsigned int io_disp[2];
 132
 133        /* When did we start a new slice */
 134        unsigned long slice_start[2];
 135        unsigned long slice_end[2];
 136};
 137
 138struct throtl_data
 139{
 140        /* service tree for active throtl groups */
 141        struct throtl_service_queue service_queue;
 142
 143        struct request_queue *queue;
 144
 145        /* Total Number of queued bios on READ and WRITE lists */
 146        unsigned int nr_queued[2];
 147
 148        /* Work for dispatching throttled bios */
 149        struct work_struct dispatch_work;
 150};
 151
 152static void throtl_pending_timer_fn(unsigned long arg);
 153
 154static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
 155{
 156        return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
 157}
 158
 159static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
 160{
 161        return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
 162}
 163
 164static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
 165{
 166        return pd_to_blkg(&tg->pd);
 167}
 168
 169/**
 170 * sq_to_tg - return the throl_grp the specified service queue belongs to
 171 * @sq: the throtl_service_queue of interest
 172 *
 173 * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
 174 * embedded in throtl_data, %NULL is returned.
 175 */
 176static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
 177{
 178        if (sq && sq->parent_sq)
 179                return container_of(sq, struct throtl_grp, service_queue);
 180        else
 181                return NULL;
 182}
 183
 184/**
 185 * sq_to_td - return throtl_data the specified service queue belongs to
 186 * @sq: the throtl_service_queue of interest
 187 *
 188 * A service_queue can be embeded in either a throtl_grp or throtl_data.
 189 * Determine the associated throtl_data accordingly and return it.
 190 */
 191static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
 192{
 193        struct throtl_grp *tg = sq_to_tg(sq);
 194
 195        if (tg)
 196                return tg->td;
 197        else
 198                return container_of(sq, struct throtl_data, service_queue);
 199}
 200
 201/**
 202 * throtl_log - log debug message via blktrace
 203 * @sq: the service_queue being reported
 204 * @fmt: printf format string
 205 * @args: printf args
 206 *
 207 * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
 208 * throtl_grp; otherwise, just "throtl".
 209 */
 210#define throtl_log(sq, fmt, args...)    do {                            \
 211        struct throtl_grp *__tg = sq_to_tg((sq));                       \
 212        struct throtl_data *__td = sq_to_td((sq));                      \
 213                                                                        \
 214        (void)__td;                                                     \
 215        if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
 216                break;                                                  \
 217        if ((__tg)) {                                                   \
 218                char __pbuf[128];                                       \
 219                                                                        \
 220                blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf));    \
 221                blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \
 222        } else {                                                        \
 223                blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
 224        }                                                               \
 225} while (0)
 226
 227static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
 228{
 229        INIT_LIST_HEAD(&qn->node);
 230        bio_list_init(&qn->bios);
 231        qn->tg = tg;
 232}
 233
 234/**
 235 * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
 236 * @bio: bio being added
 237 * @qn: qnode to add bio to
 238 * @queued: the service_queue->queued[] list @qn belongs to
 239 *
 240 * Add @bio to @qn and put @qn on @queued if it's not already on.
 241 * @qn->tg's reference count is bumped when @qn is activated.  See the
 242 * comment on top of throtl_qnode definition for details.
 243 */
 244static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
 245                                 struct list_head *queued)
 246{
 247        bio_list_add(&qn->bios, bio);
 248        if (list_empty(&qn->node)) {
 249                list_add_tail(&qn->node, queued);
 250                blkg_get(tg_to_blkg(qn->tg));
 251        }
 252}
 253
 254/**
 255 * throtl_peek_queued - peek the first bio on a qnode list
 256 * @queued: the qnode list to peek
 257 */
 258static struct bio *throtl_peek_queued(struct list_head *queued)
 259{
 260        struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 261        struct bio *bio;
 262
 263        if (list_empty(queued))
 264                return NULL;
 265
 266        bio = bio_list_peek(&qn->bios);
 267        WARN_ON_ONCE(!bio);
 268        return bio;
 269}
 270
 271/**
 272 * throtl_pop_queued - pop the first bio form a qnode list
 273 * @queued: the qnode list to pop a bio from
 274 * @tg_to_put: optional out argument for throtl_grp to put
 275 *
 276 * Pop the first bio from the qnode list @queued.  After popping, the first
 277 * qnode is removed from @queued if empty or moved to the end of @queued so
 278 * that the popping order is round-robin.
 279 *
 280 * When the first qnode is removed, its associated throtl_grp should be put
 281 * too.  If @tg_to_put is NULL, this function automatically puts it;
 282 * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
 283 * responsible for putting it.
 284 */
 285static struct bio *throtl_pop_queued(struct list_head *queued,
 286                                     struct throtl_grp **tg_to_put)
 287{
 288        struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
 289        struct bio *bio;
 290
 291        if (list_empty(queued))
 292                return NULL;
 293
 294        bio = bio_list_pop(&qn->bios);
 295        WARN_ON_ONCE(!bio);
 296
 297        if (bio_list_empty(&qn->bios)) {
 298                list_del_init(&qn->node);
 299                if (tg_to_put)
 300                        *tg_to_put = qn->tg;
 301                else
 302                        blkg_put(tg_to_blkg(qn->tg));
 303        } else {
 304                list_move_tail(&qn->node, queued);
 305        }
 306
 307        return bio;
 308}
 309
 310/* init a service_queue, assumes the caller zeroed it */
 311static void throtl_service_queue_init(struct throtl_service_queue *sq)
 312{
 313        INIT_LIST_HEAD(&sq->queued[0]);
 314        INIT_LIST_HEAD(&sq->queued[1]);
 315        sq->pending_tree = RB_ROOT;
 316        setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
 317                    (unsigned long)sq);
 318}
 319
 320static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
 321{
 322        struct throtl_grp *tg;
 323        int rw;
 324
 325        tg = kzalloc_node(sizeof(*tg), gfp, node);
 326        if (!tg)
 327                return NULL;
 328
 329        throtl_service_queue_init(&tg->service_queue);
 330
 331        for (rw = READ; rw <= WRITE; rw++) {
 332                throtl_qnode_init(&tg->qnode_on_self[rw], tg);
 333                throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
 334        }
 335
 336        RB_CLEAR_NODE(&tg->rb_node);
 337        tg->bps[READ] = -1;
 338        tg->bps[WRITE] = -1;
 339        tg->iops[READ] = -1;
 340        tg->iops[WRITE] = -1;
 341
 342        return &tg->pd;
 343}
 344
 345static void throtl_pd_init(struct blkg_policy_data *pd)
 346{
 347        struct throtl_grp *tg = pd_to_tg(pd);
 348        struct blkcg_gq *blkg = tg_to_blkg(tg);
 349        struct throtl_data *td = blkg->q->td;
 350        struct throtl_service_queue *sq = &tg->service_queue;
 351
 352        /*
 353         * If on the default hierarchy, we switch to properly hierarchical
 354         * behavior where limits on a given throtl_grp are applied to the
 355         * whole subtree rather than just the group itself.  e.g. If 16M
 356         * read_bps limit is set on the root group, the whole system can't
 357         * exceed 16M for the device.
 358         *
 359         * If not on the default hierarchy, the broken flat hierarchy
 360         * behavior is retained where all throtl_grps are treated as if
 361         * they're all separate root groups right below throtl_data.
 362         * Limits of a group don't interact with limits of other groups
 363         * regardless of the position of the group in the hierarchy.
 364         */
 365        sq->parent_sq = &td->service_queue;
 366        if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
 367                sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
 368        tg->td = td;
 369}
 370
 371/*
 372 * Set has_rules[] if @tg or any of its parents have limits configured.
 373 * This doesn't require walking up to the top of the hierarchy as the
 374 * parent's has_rules[] is guaranteed to be correct.
 375 */
 376static void tg_update_has_rules(struct throtl_grp *tg)
 377{
 378        struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
 379        int rw;
 380
 381        for (rw = READ; rw <= WRITE; rw++)
 382                tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
 383                                    (tg->bps[rw] != -1 || tg->iops[rw] != -1);
 384}
 385
 386static void throtl_pd_online(struct blkg_policy_data *pd)
 387{
 388        /*
 389         * We don't want new groups to escape the limits of its ancestors.
 390         * Update has_rules[] after a new group is brought online.
 391         */
 392        tg_update_has_rules(pd_to_tg(pd));
 393}
 394
 395static void throtl_pd_free(struct blkg_policy_data *pd)
 396{
 397        struct throtl_grp *tg = pd_to_tg(pd);
 398
 399        del_timer_sync(&tg->service_queue.pending_timer);
 400        kfree(tg);
 401}
 402
 403static struct throtl_grp *
 404throtl_rb_first(struct throtl_service_queue *parent_sq)
 405{
 406        /* Service tree is empty */
 407        if (!parent_sq->nr_pending)
 408                return NULL;
 409
 410        if (!parent_sq->first_pending)
 411                parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
 412
 413        if (parent_sq->first_pending)
 414                return rb_entry_tg(parent_sq->first_pending);
 415
 416        return NULL;
 417}
 418
 419static void rb_erase_init(struct rb_node *n, struct rb_root *root)
 420{
 421        rb_erase(n, root);
 422        RB_CLEAR_NODE(n);
 423}
 424
 425static void throtl_rb_erase(struct rb_node *n,
 426                            struct throtl_service_queue *parent_sq)
 427{
 428        if (parent_sq->first_pending == n)
 429                parent_sq->first_pending = NULL;
 430        rb_erase_init(n, &parent_sq->pending_tree);
 431        --parent_sq->nr_pending;
 432}
 433
 434static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
 435{
 436        struct throtl_grp *tg;
 437
 438        tg = throtl_rb_first(parent_sq);
 439        if (!tg)
 440                return;
 441
 442        parent_sq->first_pending_disptime = tg->disptime;
 443}
 444
 445static void tg_service_queue_add(struct throtl_grp *tg)
 446{
 447        struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
 448        struct rb_node **node = &parent_sq->pending_tree.rb_node;
 449        struct rb_node *parent = NULL;
 450        struct throtl_grp *__tg;
 451        unsigned long key = tg->disptime;
 452        int left = 1;
 453
 454        while (*node != NULL) {
 455                parent = *node;
 456                __tg = rb_entry_tg(parent);
 457
 458                if (time_before(key, __tg->disptime))
 459                        node = &parent->rb_left;
 460                else {
 461                        node = &parent->rb_right;
 462                        left = 0;
 463                }
 464        }
 465
 466        if (left)
 467                parent_sq->first_pending = &tg->rb_node;
 468
 469        rb_link_node(&tg->rb_node, parent, node);
 470        rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
 471}
 472
 473static void __throtl_enqueue_tg(struct throtl_grp *tg)
 474{
 475        tg_service_queue_add(tg);
 476        tg->flags |= THROTL_TG_PENDING;
 477        tg->service_queue.parent_sq->nr_pending++;
 478}
 479
 480static void throtl_enqueue_tg(struct throtl_grp *tg)
 481{
 482        if (!(tg->flags & THROTL_TG_PENDING))
 483                __throtl_enqueue_tg(tg);
 484}
 485
 486static void __throtl_dequeue_tg(struct throtl_grp *tg)
 487{
 488        throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
 489        tg->flags &= ~THROTL_TG_PENDING;
 490}
 491
 492static void throtl_dequeue_tg(struct throtl_grp *tg)
 493{
 494        if (tg->flags & THROTL_TG_PENDING)
 495                __throtl_dequeue_tg(tg);
 496}
 497
 498/* Call with queue lock held */
 499static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
 500                                          unsigned long expires)
 501{
 502        mod_timer(&sq->pending_timer, expires);
 503        throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
 504                   expires - jiffies, jiffies);
 505}
 506
 507/**
 508 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
 509 * @sq: the service_queue to schedule dispatch for
 510 * @force: force scheduling
 511 *
 512 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
 513 * dispatch time of the first pending child.  Returns %true if either timer
 514 * is armed or there's no pending child left.  %false if the current
 515 * dispatch window is still open and the caller should continue
 516 * dispatching.
 517 *
 518 * If @force is %true, the dispatch timer is always scheduled and this
 519 * function is guaranteed to return %true.  This is to be used when the
 520 * caller can't dispatch itself and needs to invoke pending_timer
 521 * unconditionally.  Note that forced scheduling is likely to induce short
 522 * delay before dispatch starts even if @sq->first_pending_disptime is not
 523 * in the future and thus shouldn't be used in hot paths.
 524 */
 525static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
 526                                          bool force)
 527{
 528        /* any pending children left? */
 529        if (!sq->nr_pending)
 530                return true;
 531
 532        update_min_dispatch_time(sq);
 533
 534        /* is the next dispatch time in the future? */
 535        if (force || time_after(sq->first_pending_disptime, jiffies)) {
 536                throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
 537                return true;
 538        }
 539
 540        /* tell the caller to continue dispatching */
 541        return false;
 542}
 543
 544static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
 545                bool rw, unsigned long start)
 546{
 547        tg->bytes_disp[rw] = 0;
 548        tg->io_disp[rw] = 0;
 549
 550        /*
 551         * Previous slice has expired. We must have trimmed it after last
 552         * bio dispatch. That means since start of last slice, we never used
 553         * that bandwidth. Do try to make use of that bandwidth while giving
 554         * credit.
 555         */
 556        if (time_after_eq(start, tg->slice_start[rw]))
 557                tg->slice_start[rw] = start;
 558
 559        tg->slice_end[rw] = jiffies + throtl_slice;
 560        throtl_log(&tg->service_queue,
 561                   "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
 562                   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 563                   tg->slice_end[rw], jiffies);
 564}
 565
 566static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
 567{
 568        tg->bytes_disp[rw] = 0;
 569        tg->io_disp[rw] = 0;
 570        tg->slice_start[rw] = jiffies;
 571        tg->slice_end[rw] = jiffies + throtl_slice;
 572        throtl_log(&tg->service_queue,
 573                   "[%c] new slice start=%lu end=%lu jiffies=%lu",
 574                   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 575                   tg->slice_end[rw], jiffies);
 576}
 577
 578static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
 579                                        unsigned long jiffy_end)
 580{
 581        tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 582}
 583
 584static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
 585                                       unsigned long jiffy_end)
 586{
 587        tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
 588        throtl_log(&tg->service_queue,
 589                   "[%c] extend slice start=%lu end=%lu jiffies=%lu",
 590                   rw == READ ? 'R' : 'W', tg->slice_start[rw],
 591                   tg->slice_end[rw], jiffies);
 592}
 593
 594/* Determine if previously allocated or extended slice is complete or not */
 595static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
 596{
 597        if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
 598                return false;
 599
 600        return 1;
 601}
 602
 603/* Trim the used slices and adjust slice start accordingly */
 604static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
 605{
 606        unsigned long nr_slices, time_elapsed, io_trim;
 607        u64 bytes_trim, tmp;
 608
 609        BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
 610
 611        /*
 612         * If bps are unlimited (-1), then time slice don't get
 613         * renewed. Don't try to trim the slice if slice is used. A new
 614         * slice will start when appropriate.
 615         */
 616        if (throtl_slice_used(tg, rw))
 617                return;
 618
 619        /*
 620         * A bio has been dispatched. Also adjust slice_end. It might happen
 621         * that initially cgroup limit was very low resulting in high
 622         * slice_end, but later limit was bumped up and bio was dispached
 623         * sooner, then we need to reduce slice_end. A high bogus slice_end
 624         * is bad because it does not allow new slice to start.
 625         */
 626
 627        throtl_set_slice_end(tg, rw, jiffies + throtl_slice);
 628
 629        time_elapsed = jiffies - tg->slice_start[rw];
 630
 631        nr_slices = time_elapsed / throtl_slice;
 632
 633        if (!nr_slices)
 634                return;
 635        tmp = tg->bps[rw] * throtl_slice * nr_slices;
 636        do_div(tmp, HZ);
 637        bytes_trim = tmp;
 638
 639        io_trim = (tg->iops[rw] * throtl_slice * nr_slices)/HZ;
 640
 641        if (!bytes_trim && !io_trim)
 642                return;
 643
 644        if (tg->bytes_disp[rw] >= bytes_trim)
 645                tg->bytes_disp[rw] -= bytes_trim;
 646        else
 647                tg->bytes_disp[rw] = 0;
 648
 649        if (tg->io_disp[rw] >= io_trim)
 650                tg->io_disp[rw] -= io_trim;
 651        else
 652                tg->io_disp[rw] = 0;
 653
 654        tg->slice_start[rw] += nr_slices * throtl_slice;
 655
 656        throtl_log(&tg->service_queue,
 657                   "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
 658                   rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
 659                   tg->slice_start[rw], tg->slice_end[rw], jiffies);
 660}
 661
 662static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
 663                                  unsigned long *wait)
 664{
 665        bool rw = bio_data_dir(bio);
 666        unsigned int io_allowed;
 667        unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 668        u64 tmp;
 669
 670        jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 671
 672        /* Slice has just started. Consider one slice interval */
 673        if (!jiffy_elapsed)
 674                jiffy_elapsed_rnd = throtl_slice;
 675
 676        jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 677
 678        /*
 679         * jiffy_elapsed_rnd should not be a big value as minimum iops can be
 680         * 1 then at max jiffy elapsed should be equivalent of 1 second as we
 681         * will allow dispatch after 1 second and after that slice should
 682         * have been trimmed.
 683         */
 684
 685        tmp = (u64)tg->iops[rw] * jiffy_elapsed_rnd;
 686        do_div(tmp, HZ);
 687
 688        if (tmp > UINT_MAX)
 689                io_allowed = UINT_MAX;
 690        else
 691                io_allowed = tmp;
 692
 693        if (tg->io_disp[rw] + 1 <= io_allowed) {
 694                if (wait)
 695                        *wait = 0;
 696                return true;
 697        }
 698
 699        /* Calc approx time to dispatch */
 700        jiffy_wait = ((tg->io_disp[rw] + 1) * HZ)/tg->iops[rw] + 1;
 701
 702        if (jiffy_wait > jiffy_elapsed)
 703                jiffy_wait = jiffy_wait - jiffy_elapsed;
 704        else
 705                jiffy_wait = 1;
 706
 707        if (wait)
 708                *wait = jiffy_wait;
 709        return 0;
 710}
 711
 712static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
 713                                 unsigned long *wait)
 714{
 715        bool rw = bio_data_dir(bio);
 716        u64 bytes_allowed, extra_bytes, tmp;
 717        unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 718
 719        jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
 720
 721        /* Slice has just started. Consider one slice interval */
 722        if (!jiffy_elapsed)
 723                jiffy_elapsed_rnd = throtl_slice;
 724
 725        jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, throtl_slice);
 726
 727        tmp = tg->bps[rw] * jiffy_elapsed_rnd;
 728        do_div(tmp, HZ);
 729        bytes_allowed = tmp;
 730
 731        if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
 732                if (wait)
 733                        *wait = 0;
 734                return true;
 735        }
 736
 737        /* Calc approx time to dispatch */
 738        extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
 739        jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
 740
 741        if (!jiffy_wait)
 742                jiffy_wait = 1;
 743
 744        /*
 745         * This wait time is without taking into consideration the rounding
 746         * up we did. Add that time also.
 747         */
 748        jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
 749        if (wait)
 750                *wait = jiffy_wait;
 751        return 0;
 752}
 753
 754/*
 755 * Returns whether one can dispatch a bio or not. Also returns approx number
 756 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
 757 */
 758static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 759                            unsigned long *wait)
 760{
 761        bool rw = bio_data_dir(bio);
 762        unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
 763
 764        /*
 765         * Currently whole state machine of group depends on first bio
 766         * queued in the group bio list. So one should not be calling
 767         * this function with a different bio if there are other bios
 768         * queued.
 769         */
 770        BUG_ON(tg->service_queue.nr_queued[rw] &&
 771               bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
 772
 773        /* If tg->bps = -1, then BW is unlimited */
 774        if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
 775                if (wait)
 776                        *wait = 0;
 777                return true;
 778        }
 779
 780        /*
 781         * If previous slice expired, start a new one otherwise renew/extend
 782         * existing slice to make sure it is at least throtl_slice interval
 783         * long since now. New slice is started only for empty throttle group.
 784         * If there is queued bio, that means there should be an active
 785         * slice and it should be extended instead.
 786         */
 787        if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
 788                throtl_start_new_slice(tg, rw);
 789        else {
 790                if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
 791                        throtl_extend_slice(tg, rw, jiffies + throtl_slice);
 792        }
 793
 794        if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
 795            tg_with_in_iops_limit(tg, bio, &iops_wait)) {
 796                if (wait)
 797                        *wait = 0;
 798                return 1;
 799        }
 800
 801        max_wait = max(bps_wait, iops_wait);
 802
 803        if (wait)
 804                *wait = max_wait;
 805
 806        if (time_before(tg->slice_end[rw], jiffies + max_wait))
 807                throtl_extend_slice(tg, rw, jiffies + max_wait);
 808
 809        return 0;
 810}
 811
 812static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
 813{
 814        bool rw = bio_data_dir(bio);
 815
 816        /* Charge the bio to the group */
 817        tg->bytes_disp[rw] += bio->bi_iter.bi_size;
 818        tg->io_disp[rw]++;
 819
 820        /*
 821         * REQ_THROTTLED is used to prevent the same bio to be throttled
 822         * more than once as a throttled bio will go through blk-throtl the
 823         * second time when it eventually gets issued.  Set it when a bio
 824         * is being charged to a tg.
 825         */
 826        if (!(bio->bi_opf & REQ_THROTTLED))
 827                bio->bi_opf |= REQ_THROTTLED;
 828}
 829
 830/**
 831 * throtl_add_bio_tg - add a bio to the specified throtl_grp
 832 * @bio: bio to add
 833 * @qn: qnode to use
 834 * @tg: the target throtl_grp
 835 *
 836 * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
 837 * tg->qnode_on_self[] is used.
 838 */
 839static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
 840                              struct throtl_grp *tg)
 841{
 842        struct throtl_service_queue *sq = &tg->service_queue;
 843        bool rw = bio_data_dir(bio);
 844
 845        if (!qn)
 846                qn = &tg->qnode_on_self[rw];
 847
 848        /*
 849         * If @tg doesn't currently have any bios queued in the same
 850         * direction, queueing @bio can change when @tg should be
 851         * dispatched.  Mark that @tg was empty.  This is automatically
 852         * cleaered on the next tg_update_disptime().
 853         */
 854        if (!sq->nr_queued[rw])
 855                tg->flags |= THROTL_TG_WAS_EMPTY;
 856
 857        throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
 858
 859        sq->nr_queued[rw]++;
 860        throtl_enqueue_tg(tg);
 861}
 862
 863static void tg_update_disptime(struct throtl_grp *tg)
 864{
 865        struct throtl_service_queue *sq = &tg->service_queue;
 866        unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
 867        struct bio *bio;
 868
 869        if ((bio = throtl_peek_queued(&sq->queued[READ])))
 870                tg_may_dispatch(tg, bio, &read_wait);
 871
 872        if ((bio = throtl_peek_queued(&sq->queued[WRITE])))
 873                tg_may_dispatch(tg, bio, &write_wait);
 874
 875        min_wait = min(read_wait, write_wait);
 876        disptime = jiffies + min_wait;
 877
 878        /* Update dispatch time */
 879        throtl_dequeue_tg(tg);
 880        tg->disptime = disptime;
 881        throtl_enqueue_tg(tg);
 882
 883        /* see throtl_add_bio_tg() */
 884        tg->flags &= ~THROTL_TG_WAS_EMPTY;
 885}
 886
 887static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
 888                                        struct throtl_grp *parent_tg, bool rw)
 889{
 890        if (throtl_slice_used(parent_tg, rw)) {
 891                throtl_start_new_slice_with_credit(parent_tg, rw,
 892                                child_tg->slice_start[rw]);
 893        }
 894
 895}
 896
 897static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
 898{
 899        struct throtl_service_queue *sq = &tg->service_queue;
 900        struct throtl_service_queue *parent_sq = sq->parent_sq;
 901        struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
 902        struct throtl_grp *tg_to_put = NULL;
 903        struct bio *bio;
 904
 905        /*
 906         * @bio is being transferred from @tg to @parent_sq.  Popping a bio
 907         * from @tg may put its reference and @parent_sq might end up
 908         * getting released prematurely.  Remember the tg to put and put it
 909         * after @bio is transferred to @parent_sq.
 910         */
 911        bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
 912        sq->nr_queued[rw]--;
 913
 914        throtl_charge_bio(tg, bio);
 915
 916        /*
 917         * If our parent is another tg, we just need to transfer @bio to
 918         * the parent using throtl_add_bio_tg().  If our parent is
 919         * @td->service_queue, @bio is ready to be issued.  Put it on its
 920         * bio_lists[] and decrease total number queued.  The caller is
 921         * responsible for issuing these bios.
 922         */
 923        if (parent_tg) {
 924                throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
 925                start_parent_slice_with_credit(tg, parent_tg, rw);
 926        } else {
 927                throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
 928                                     &parent_sq->queued[rw]);
 929                BUG_ON(tg->td->nr_queued[rw] <= 0);
 930                tg->td->nr_queued[rw]--;
 931        }
 932
 933        throtl_trim_slice(tg, rw);
 934
 935        if (tg_to_put)
 936                blkg_put(tg_to_blkg(tg_to_put));
 937}
 938
 939static int throtl_dispatch_tg(struct throtl_grp *tg)
 940{
 941        struct throtl_service_queue *sq = &tg->service_queue;
 942        unsigned int nr_reads = 0, nr_writes = 0;
 943        unsigned int max_nr_reads = throtl_grp_quantum*3/4;
 944        unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
 945        struct bio *bio;
 946
 947        /* Try to dispatch 75% READS and 25% WRITES */
 948
 949        while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
 950               tg_may_dispatch(tg, bio, NULL)) {
 951
 952                tg_dispatch_one_bio(tg, bio_data_dir(bio));
 953                nr_reads++;
 954
 955                if (nr_reads >= max_nr_reads)
 956                        break;
 957        }
 958
 959        while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
 960               tg_may_dispatch(tg, bio, NULL)) {
 961
 962                tg_dispatch_one_bio(tg, bio_data_dir(bio));
 963                nr_writes++;
 964
 965                if (nr_writes >= max_nr_writes)
 966                        break;
 967        }
 968
 969        return nr_reads + nr_writes;
 970}
 971
 972static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
 973{
 974        unsigned int nr_disp = 0;
 975
 976        while (1) {
 977                struct throtl_grp *tg = throtl_rb_first(parent_sq);
 978                struct throtl_service_queue *sq = &tg->service_queue;
 979
 980                if (!tg)
 981                        break;
 982
 983                if (time_before(jiffies, tg->disptime))
 984                        break;
 985
 986                throtl_dequeue_tg(tg);
 987
 988                nr_disp += throtl_dispatch_tg(tg);
 989
 990                if (sq->nr_queued[0] || sq->nr_queued[1])
 991                        tg_update_disptime(tg);
 992
 993                if (nr_disp >= throtl_quantum)
 994                        break;
 995        }
 996
 997        return nr_disp;
 998}
 999
1000/**
1001 * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1002 * @arg: the throtl_service_queue being serviced
1003 *
1004 * This timer is armed when a child throtl_grp with active bio's become
1005 * pending and queued on the service_queue's pending_tree and expires when
1006 * the first child throtl_grp should be dispatched.  This function
1007 * dispatches bio's from the children throtl_grps to the parent
1008 * service_queue.
1009 *
1010 * If the parent's parent is another throtl_grp, dispatching is propagated
1011 * by either arming its pending_timer or repeating dispatch directly.  If
1012 * the top-level service_tree is reached, throtl_data->dispatch_work is
1013 * kicked so that the ready bio's are issued.
1014 */
1015static void throtl_pending_timer_fn(unsigned long arg)
1016{
1017        struct throtl_service_queue *sq = (void *)arg;
1018        struct throtl_grp *tg = sq_to_tg(sq);
1019        struct throtl_data *td = sq_to_td(sq);
1020        struct request_queue *q = td->queue;
1021        struct throtl_service_queue *parent_sq;
1022        bool dispatched;
1023        int ret;
1024
1025        spin_lock_irq(q->queue_lock);
1026again:
1027        parent_sq = sq->parent_sq;
1028        dispatched = false;
1029
1030        while (true) {
1031                throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1032                           sq->nr_queued[READ] + sq->nr_queued[WRITE],
1033                           sq->nr_queued[READ], sq->nr_queued[WRITE]);
1034
1035                ret = throtl_select_dispatch(sq);
1036                if (ret) {
1037                        throtl_log(sq, "bios disp=%u", ret);
1038                        dispatched = true;
1039                }
1040
1041                if (throtl_schedule_next_dispatch(sq, false))
1042                        break;
1043
1044                /* this dispatch windows is still open, relax and repeat */
1045                spin_unlock_irq(q->queue_lock);
1046                cpu_relax();
1047                spin_lock_irq(q->queue_lock);
1048        }
1049
1050        if (!dispatched)
1051                goto out_unlock;
1052
1053        if (parent_sq) {
1054                /* @parent_sq is another throl_grp, propagate dispatch */
1055                if (tg->flags & THROTL_TG_WAS_EMPTY) {
1056                        tg_update_disptime(tg);
1057                        if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1058                                /* window is already open, repeat dispatching */
1059                                sq = parent_sq;
1060                                tg = sq_to_tg(sq);
1061                                goto again;
1062                        }
1063                }
1064        } else {
1065                /* reached the top-level, queue issueing */
1066                queue_work(kthrotld_workqueue, &td->dispatch_work);
1067        }
1068out_unlock:
1069        spin_unlock_irq(q->queue_lock);
1070}
1071
1072/**
1073 * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1074 * @work: work item being executed
1075 *
1076 * This function is queued for execution when bio's reach the bio_lists[]
1077 * of throtl_data->service_queue.  Those bio's are ready and issued by this
1078 * function.
1079 */
1080static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1081{
1082        struct throtl_data *td = container_of(work, struct throtl_data,
1083                                              dispatch_work);
1084        struct throtl_service_queue *td_sq = &td->service_queue;
1085        struct request_queue *q = td->queue;
1086        struct bio_list bio_list_on_stack;
1087        struct bio *bio;
1088        struct blk_plug plug;
1089        int rw;
1090
1091        bio_list_init(&bio_list_on_stack);
1092
1093        spin_lock_irq(q->queue_lock);
1094        for (rw = READ; rw <= WRITE; rw++)
1095                while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1096                        bio_list_add(&bio_list_on_stack, bio);
1097        spin_unlock_irq(q->queue_lock);
1098
1099        if (!bio_list_empty(&bio_list_on_stack)) {
1100                blk_start_plug(&plug);
1101                while((bio = bio_list_pop(&bio_list_on_stack)))
1102                        generic_make_request(bio);
1103                blk_finish_plug(&plug);
1104        }
1105}
1106
1107static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1108                              int off)
1109{
1110        struct throtl_grp *tg = pd_to_tg(pd);
1111        u64 v = *(u64 *)((void *)tg + off);
1112
1113        if (v == -1)
1114                return 0;
1115        return __blkg_prfill_u64(sf, pd, v);
1116}
1117
1118static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1119                               int off)
1120{
1121        struct throtl_grp *tg = pd_to_tg(pd);
1122        unsigned int v = *(unsigned int *)((void *)tg + off);
1123
1124        if (v == -1)
1125                return 0;
1126        return __blkg_prfill_u64(sf, pd, v);
1127}
1128
1129static int tg_print_conf_u64(struct seq_file *sf, void *v)
1130{
1131        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1132                          &blkcg_policy_throtl, seq_cft(sf)->private, false);
1133        return 0;
1134}
1135
1136static int tg_print_conf_uint(struct seq_file *sf, void *v)
1137{
1138        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1139                          &blkcg_policy_throtl, seq_cft(sf)->private, false);
1140        return 0;
1141}
1142
1143static void tg_conf_updated(struct throtl_grp *tg)
1144{
1145        struct throtl_service_queue *sq = &tg->service_queue;
1146        struct cgroup_subsys_state *pos_css;
1147        struct blkcg_gq *blkg;
1148
1149        throtl_log(&tg->service_queue,
1150                   "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1151                   tg->bps[READ], tg->bps[WRITE],
1152                   tg->iops[READ], tg->iops[WRITE]);
1153
1154        /*
1155         * Update has_rules[] flags for the updated tg's subtree.  A tg is
1156         * considered to have rules if either the tg itself or any of its
1157         * ancestors has rules.  This identifies groups without any
1158         * restrictions in the whole hierarchy and allows them to bypass
1159         * blk-throttle.
1160         */
1161        blkg_for_each_descendant_pre(blkg, pos_css, tg_to_blkg(tg))
1162                tg_update_has_rules(blkg_to_tg(blkg));
1163
1164        /*
1165         * We're already holding queue_lock and know @tg is valid.  Let's
1166         * apply the new config directly.
1167         *
1168         * Restart the slices for both READ and WRITES. It might happen
1169         * that a group's limit are dropped suddenly and we don't want to
1170         * account recently dispatched IO with new low rate.
1171         */
1172        throtl_start_new_slice(tg, 0);
1173        throtl_start_new_slice(tg, 1);
1174
1175        if (tg->flags & THROTL_TG_PENDING) {
1176                tg_update_disptime(tg);
1177                throtl_schedule_next_dispatch(sq->parent_sq, true);
1178        }
1179}
1180
1181static ssize_t tg_set_conf(struct kernfs_open_file *of,
1182                           char *buf, size_t nbytes, loff_t off, bool is_u64)
1183{
1184        struct blkcg *blkcg = css_to_blkcg(of_css(of));
1185        struct blkg_conf_ctx ctx;
1186        struct throtl_grp *tg;
1187        int ret;
1188        u64 v;
1189
1190        ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1191        if (ret)
1192                return ret;
1193
1194        ret = -EINVAL;
1195        if (sscanf(ctx.body, "%llu", &v) != 1)
1196                goto out_finish;
1197        if (!v)
1198                v = -1;
1199
1200        tg = blkg_to_tg(ctx.blkg);
1201
1202        if (is_u64)
1203                *(u64 *)((void *)tg + of_cft(of)->private) = v;
1204        else
1205                *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1206
1207        tg_conf_updated(tg);
1208        ret = 0;
1209out_finish:
1210        blkg_conf_finish(&ctx);
1211        return ret ?: nbytes;
1212}
1213
1214static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1215                               char *buf, size_t nbytes, loff_t off)
1216{
1217        return tg_set_conf(of, buf, nbytes, off, true);
1218}
1219
1220static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1221                                char *buf, size_t nbytes, loff_t off)
1222{
1223        return tg_set_conf(of, buf, nbytes, off, false);
1224}
1225
1226static struct cftype throtl_legacy_files[] = {
1227        {
1228                .name = "throttle.read_bps_device",
1229                .private = offsetof(struct throtl_grp, bps[READ]),
1230                .seq_show = tg_print_conf_u64,
1231                .write = tg_set_conf_u64,
1232        },
1233        {
1234                .name = "throttle.write_bps_device",
1235                .private = offsetof(struct throtl_grp, bps[WRITE]),
1236                .seq_show = tg_print_conf_u64,
1237                .write = tg_set_conf_u64,
1238        },
1239        {
1240                .name = "throttle.read_iops_device",
1241                .private = offsetof(struct throtl_grp, iops[READ]),
1242                .seq_show = tg_print_conf_uint,
1243                .write = tg_set_conf_uint,
1244        },
1245        {
1246                .name = "throttle.write_iops_device",
1247                .private = offsetof(struct throtl_grp, iops[WRITE]),
1248                .seq_show = tg_print_conf_uint,
1249                .write = tg_set_conf_uint,
1250        },
1251        {
1252                .name = "throttle.io_service_bytes",
1253                .private = (unsigned long)&blkcg_policy_throtl,
1254                .seq_show = blkg_print_stat_bytes,
1255        },
1256        {
1257                .name = "throttle.io_serviced",
1258                .private = (unsigned long)&blkcg_policy_throtl,
1259                .seq_show = blkg_print_stat_ios,
1260        },
1261        { }     /* terminate */
1262};
1263
1264static u64 tg_prfill_max(struct seq_file *sf, struct blkg_policy_data *pd,
1265                         int off)
1266{
1267        struct throtl_grp *tg = pd_to_tg(pd);
1268        const char *dname = blkg_dev_name(pd->blkg);
1269        char bufs[4][21] = { "max", "max", "max", "max" };
1270
1271        if (!dname)
1272                return 0;
1273        if (tg->bps[READ] == -1 && tg->bps[WRITE] == -1 &&
1274            tg->iops[READ] == -1 && tg->iops[WRITE] == -1)
1275                return 0;
1276
1277        if (tg->bps[READ] != -1)
1278                snprintf(bufs[0], sizeof(bufs[0]), "%llu", tg->bps[READ]);
1279        if (tg->bps[WRITE] != -1)
1280                snprintf(bufs[1], sizeof(bufs[1]), "%llu", tg->bps[WRITE]);
1281        if (tg->iops[READ] != -1)
1282                snprintf(bufs[2], sizeof(bufs[2]), "%u", tg->iops[READ]);
1283        if (tg->iops[WRITE] != -1)
1284                snprintf(bufs[3], sizeof(bufs[3]), "%u", tg->iops[WRITE]);
1285
1286        seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s\n",
1287                   dname, bufs[0], bufs[1], bufs[2], bufs[3]);
1288        return 0;
1289}
1290
1291static int tg_print_max(struct seq_file *sf, void *v)
1292{
1293        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_max,
1294                          &blkcg_policy_throtl, seq_cft(sf)->private, false);
1295        return 0;
1296}
1297
1298static ssize_t tg_set_max(struct kernfs_open_file *of,
1299                          char *buf, size_t nbytes, loff_t off)
1300{
1301        struct blkcg *blkcg = css_to_blkcg(of_css(of));
1302        struct blkg_conf_ctx ctx;
1303        struct throtl_grp *tg;
1304        u64 v[4];
1305        int ret;
1306
1307        ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1308        if (ret)
1309                return ret;
1310
1311        tg = blkg_to_tg(ctx.blkg);
1312
1313        v[0] = tg->bps[READ];
1314        v[1] = tg->bps[WRITE];
1315        v[2] = tg->iops[READ];
1316        v[3] = tg->iops[WRITE];
1317
1318        while (true) {
1319                char tok[27];   /* wiops=18446744073709551616 */
1320                char *p;
1321                u64 val = -1;
1322                int len;
1323
1324                if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1325                        break;
1326                if (tok[0] == '\0')
1327                        break;
1328                ctx.body += len;
1329
1330                ret = -EINVAL;
1331                p = tok;
1332                strsep(&p, "=");
1333                if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1334                        goto out_finish;
1335
1336                ret = -ERANGE;
1337                if (!val)
1338                        goto out_finish;
1339
1340                ret = -EINVAL;
1341                if (!strcmp(tok, "rbps"))
1342                        v[0] = val;
1343                else if (!strcmp(tok, "wbps"))
1344                        v[1] = val;
1345                else if (!strcmp(tok, "riops"))
1346                        v[2] = min_t(u64, val, UINT_MAX);
1347                else if (!strcmp(tok, "wiops"))
1348                        v[3] = min_t(u64, val, UINT_MAX);
1349                else
1350                        goto out_finish;
1351        }
1352
1353        tg->bps[READ] = v[0];
1354        tg->bps[WRITE] = v[1];
1355        tg->iops[READ] = v[2];
1356        tg->iops[WRITE] = v[3];
1357
1358        tg_conf_updated(tg);
1359        ret = 0;
1360out_finish:
1361        blkg_conf_finish(&ctx);
1362        return ret ?: nbytes;
1363}
1364
1365static struct cftype throtl_files[] = {
1366        {
1367                .name = "max",
1368                .flags = CFTYPE_NOT_ON_ROOT,
1369                .seq_show = tg_print_max,
1370                .write = tg_set_max,
1371        },
1372        { }     /* terminate */
1373};
1374
1375static void throtl_shutdown_wq(struct request_queue *q)
1376{
1377        struct throtl_data *td = q->td;
1378
1379        cancel_work_sync(&td->dispatch_work);
1380}
1381
1382static struct blkcg_policy blkcg_policy_throtl = {
1383        .dfl_cftypes            = throtl_files,
1384        .legacy_cftypes         = throtl_legacy_files,
1385
1386        .pd_alloc_fn            = throtl_pd_alloc,
1387        .pd_init_fn             = throtl_pd_init,
1388        .pd_online_fn           = throtl_pd_online,
1389        .pd_free_fn             = throtl_pd_free,
1390};
1391
1392bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
1393                    struct bio *bio)
1394{
1395        struct throtl_qnode *qn = NULL;
1396        struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
1397        struct throtl_service_queue *sq;
1398        bool rw = bio_data_dir(bio);
1399        bool throttled = false;
1400
1401        WARN_ON_ONCE(!rcu_read_lock_held());
1402
1403        /* see throtl_charge_bio() */
1404        if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
1405                goto out;
1406
1407        spin_lock_irq(q->queue_lock);
1408
1409        if (unlikely(blk_queue_bypass(q)))
1410                goto out_unlock;
1411
1412        sq = &tg->service_queue;
1413
1414        while (true) {
1415                /* throtl is FIFO - if bios are already queued, should queue */
1416                if (sq->nr_queued[rw])
1417                        break;
1418
1419                /* if above limits, break to queue */
1420                if (!tg_may_dispatch(tg, bio, NULL))
1421                        break;
1422
1423                /* within limits, let's charge and dispatch directly */
1424                throtl_charge_bio(tg, bio);
1425
1426                /*
1427                 * We need to trim slice even when bios are not being queued
1428                 * otherwise it might happen that a bio is not queued for
1429                 * a long time and slice keeps on extending and trim is not
1430                 * called for a long time. Now if limits are reduced suddenly
1431                 * we take into account all the IO dispatched so far at new
1432                 * low rate and * newly queued IO gets a really long dispatch
1433                 * time.
1434                 *
1435                 * So keep on trimming slice even if bio is not queued.
1436                 */
1437                throtl_trim_slice(tg, rw);
1438
1439                /*
1440                 * @bio passed through this layer without being throttled.
1441                 * Climb up the ladder.  If we''re already at the top, it
1442                 * can be executed directly.
1443                 */
1444                qn = &tg->qnode_on_parent[rw];
1445                sq = sq->parent_sq;
1446                tg = sq_to_tg(sq);
1447                if (!tg)
1448                        goto out_unlock;
1449        }
1450
1451        /* out-of-limit, queue to @tg */
1452        throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1453                   rw == READ ? 'R' : 'W',
1454                   tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
1455                   tg->io_disp[rw], tg->iops[rw],
1456                   sq->nr_queued[READ], sq->nr_queued[WRITE]);
1457
1458        bio_associate_current(bio);
1459        tg->td->nr_queued[rw]++;
1460        throtl_add_bio_tg(bio, qn, tg);
1461        throttled = true;
1462
1463        /*
1464         * Update @tg's dispatch time and force schedule dispatch if @tg
1465         * was empty before @bio.  The forced scheduling isn't likely to
1466         * cause undue delay as @bio is likely to be dispatched directly if
1467         * its @tg's disptime is not in the future.
1468         */
1469        if (tg->flags & THROTL_TG_WAS_EMPTY) {
1470                tg_update_disptime(tg);
1471                throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1472        }
1473
1474out_unlock:
1475        spin_unlock_irq(q->queue_lock);
1476out:
1477        /*
1478         * As multiple blk-throtls may stack in the same issue path, we
1479         * don't want bios to leave with the flag set.  Clear the flag if
1480         * being issued.
1481         */
1482        if (!throttled)
1483                bio->bi_opf &= ~REQ_THROTTLED;
1484        return throttled;
1485}
1486
1487/*
1488 * Dispatch all bios from all children tg's queued on @parent_sq.  On
1489 * return, @parent_sq is guaranteed to not have any active children tg's
1490 * and all bios from previously active tg's are on @parent_sq->bio_lists[].
1491 */
1492static void tg_drain_bios(struct throtl_service_queue *parent_sq)
1493{
1494        struct throtl_grp *tg;
1495
1496        while ((tg = throtl_rb_first(parent_sq))) {
1497                struct throtl_service_queue *sq = &tg->service_queue;
1498                struct bio *bio;
1499
1500                throtl_dequeue_tg(tg);
1501
1502                while ((bio = throtl_peek_queued(&sq->queued[READ])))
1503                        tg_dispatch_one_bio(tg, bio_data_dir(bio));
1504                while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
1505                        tg_dispatch_one_bio(tg, bio_data_dir(bio));
1506        }
1507}
1508
1509/**
1510 * blk_throtl_drain - drain throttled bios
1511 * @q: request_queue to drain throttled bios for
1512 *
1513 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1514 */
1515void blk_throtl_drain(struct request_queue *q)
1516        __releases(q->queue_lock) __acquires(q->queue_lock)
1517{
1518        struct throtl_data *td = q->td;
1519        struct blkcg_gq *blkg;
1520        struct cgroup_subsys_state *pos_css;
1521        struct bio *bio;
1522        int rw;
1523
1524        queue_lockdep_assert_held(q);
1525        rcu_read_lock();
1526
1527        /*
1528         * Drain each tg while doing post-order walk on the blkg tree, so
1529         * that all bios are propagated to td->service_queue.  It'd be
1530         * better to walk service_queue tree directly but blkg walk is
1531         * easier.
1532         */
1533        blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
1534                tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
1535
1536        /* finally, transfer bios from top-level tg's into the td */
1537        tg_drain_bios(&td->service_queue);
1538
1539        rcu_read_unlock();
1540        spin_unlock_irq(q->queue_lock);
1541
1542        /* all bios now should be in td->service_queue, issue them */
1543        for (rw = READ; rw <= WRITE; rw++)
1544                while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
1545                                                NULL)))
1546                        generic_make_request(bio);
1547
1548        spin_lock_irq(q->queue_lock);
1549}
1550
1551int blk_throtl_init(struct request_queue *q)
1552{
1553        struct throtl_data *td;
1554        int ret;
1555
1556        td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
1557        if (!td)
1558                return -ENOMEM;
1559
1560        INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
1561        throtl_service_queue_init(&td->service_queue);
1562
1563        q->td = td;
1564        td->queue = q;
1565
1566        /* activate policy */
1567        ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
1568        if (ret)
1569                kfree(td);
1570        return ret;
1571}
1572
1573void blk_throtl_exit(struct request_queue *q)
1574{
1575        BUG_ON(!q->td);
1576        throtl_shutdown_wq(q);
1577        blkcg_deactivate_policy(q, &blkcg_policy_throtl);
1578        kfree(q->td);
1579}
1580
1581static int __init throtl_init(void)
1582{
1583        kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1584        if (!kthrotld_workqueue)
1585                panic("Failed to create kthrotld\n");
1586
1587        return blkcg_policy_register(&blkcg_policy_throtl);
1588}
1589
1590module_init(throtl_init);
1591