linux/block/blk-iolatency.c
<<
>>
Prefs
   1/*
   2 * Block rq-qos base io controller
   3 *
   4 * This works similar to wbt with a few exceptions
   5 *
   6 * - It's bio based, so the latency covers the whole block layer in addition to
   7 *   the actual io.
   8 * - We will throttle all IO that comes in here if we need to.
   9 * - We use the mean latency over the 100ms window.  This is because writes can
  10 *   be particularly fast, which could give us a false sense of the impact of
  11 *   other workloads on our protected workload.
  12 * - By default there's no throttling, we set the queue_depth to UINT_MAX so
  13 *   that we can have as many outstanding bio's as we're allowed to.  Only at
  14 *   throttle time do we pay attention to the actual queue depth.
  15 *
  16 * The hierarchy works like the cpu controller does, we track the latency at
  17 * every configured node, and each configured node has it's own independent
  18 * queue depth.  This means that we only care about our latency targets at the
  19 * peer level.  Some group at the bottom of the hierarchy isn't going to affect
  20 * a group at the end of some other path if we're only configred at leaf level.
  21 *
  22 * Consider the following
  23 *
  24 *                   root blkg
  25 *             /                     \
  26 *        fast (target=5ms)     slow (target=10ms)
  27 *         /     \                  /        \
  28 *       a        b          normal(15ms)   unloved
  29 *
  30 * "a" and "b" have no target, but their combined io under "fast" cannot exceed
  31 * an average latency of 5ms.  If it does then we will throttle the "slow"
  32 * group.  In the case of "normal", if it exceeds its 15ms target, we will
  33 * throttle "unloved", but nobody else.
  34 *
  35 * In this example "fast", "slow", and "normal" will be the only groups actually
  36 * accounting their io latencies.  We have to walk up the heirarchy to the root
  37 * on every submit and complete so we can do the appropriate stat recording and
  38 * adjust the queue depth of ourselves if needed.
  39 *
  40 * There are 2 ways we throttle IO.
  41 *
  42 * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
  43 * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
  44 * to 1.  If the group is only ever submitting IO for itself then this is the
  45 * only way we throttle.
  46 *
  47 * 2) Induced delay throttling.  This is for the case that a group is generating
  48 * IO that has to be issued by the root cg to avoid priority inversion. So think
  49 * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
  50 * of work done for us on behalf of the root cg and are being asked to scale
  51 * down more then we induce a latency at userspace return.  We accumulate the
  52 * total amount of time we need to be punished by doing
  53 *
  54 * total_time += min_lat_nsec - actual_io_completion
  55 *
  56 * and then at throttle time will do
  57 *
  58 * throttle_time = min(total_time, NSEC_PER_SEC)
  59 *
  60 * This induced delay will throttle back the activity that is generating the
  61 * root cg issued io's, wethere that's some metadata intensive operation or the
  62 * group is using so much memory that it is pushing us into swap.
  63 *
  64 * Copyright (C) 2018 Josef Bacik
  65 */
  66#include <linux/kernel.h>
  67#include <linux/blk_types.h>
  68#include <linux/backing-dev.h>
  69#include <linux/module.h>
  70#include <linux/timer.h>
  71#include <linux/memcontrol.h>
  72#include <linux/sched/loadavg.h>
  73#include <linux/sched/signal.h>
  74#include <trace/events/block.h>
  75#include "blk-rq-qos.h"
  76#include "blk-stat.h"
  77
  78#define DEFAULT_SCALE_COOKIE 1000000U
  79
  80static struct blkcg_policy blkcg_policy_iolatency;
  81struct iolatency_grp;
  82
  83struct blk_iolatency {
  84        struct rq_qos rqos;
  85        struct timer_list timer;
  86        atomic_t enabled;
  87};
  88
  89static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
  90{
  91        return container_of(rqos, struct blk_iolatency, rqos);
  92}
  93
  94static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
  95{
  96        return atomic_read(&blkiolat->enabled) > 0;
  97}
  98
  99struct child_latency_info {
 100        spinlock_t lock;
 101
 102        /* Last time we adjusted the scale of everybody. */
 103        u64 last_scale_event;
 104
 105        /* The latency that we missed. */
 106        u64 scale_lat;
 107
 108        /* Total io's from all of our children for the last summation. */
 109        u64 nr_samples;
 110
 111        /* The guy who actually changed the latency numbers. */
 112        struct iolatency_grp *scale_grp;
 113
 114        /* Cookie to tell if we need to scale up or down. */
 115        atomic_t scale_cookie;
 116};
 117
 118struct iolatency_grp {
 119        struct blkg_policy_data pd;
 120        struct blk_rq_stat __percpu *stats;
 121        struct blk_iolatency *blkiolat;
 122        struct rq_depth rq_depth;
 123        struct rq_wait rq_wait;
 124        atomic64_t window_start;
 125        atomic_t scale_cookie;
 126        u64 min_lat_nsec;
 127        u64 cur_win_nsec;
 128
 129        /* total running average of our io latency. */
 130        u64 lat_avg;
 131
 132        /* Our current number of IO's for the last summation. */
 133        u64 nr_samples;
 134
 135        struct child_latency_info child_lat;
 136};
 137
 138#define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
 139#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
 140/*
 141 * These are the constants used to fake the fixed-point moving average
 142 * calculation just like load average.  The call to calc_load() folds
 143 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
 144 * window size is bucketed to try to approximately calculate average
 145 * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
 146 * elapse immediately.  Note, windows only elapse with IO activity.  Idle
 147 * periods extend the most recent window.
 148 */
 149#define BLKIOLATENCY_NR_EXP_FACTORS 5
 150#define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
 151                                      (BLKIOLATENCY_NR_EXP_FACTORS - 1))
 152static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
 153        2045, // exp(1/600) - 600 samples
 154        2039, // exp(1/240) - 240 samples
 155        2031, // exp(1/120) - 120 samples
 156        2023, // exp(1/80)  - 80 samples
 157        2014, // exp(1/60)  - 60 samples
 158};
 159
 160static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
 161{
 162        return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
 163}
 164
 165static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
 166{
 167        return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
 168}
 169
 170static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
 171{
 172        return pd_to_blkg(&iolat->pd);
 173}
 174
 175static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
 176{
 177        atomic_dec(&rqw->inflight);
 178        wake_up(&rqw->wait);
 179}
 180
 181static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
 182{
 183        struct iolatency_grp *iolat = private_data;
 184        return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
 185}
 186
 187static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
 188                                       struct iolatency_grp *iolat,
 189                                       bool issue_as_root,
 190                                       bool use_memdelay)
 191{
 192        struct rq_wait *rqw = &iolat->rq_wait;
 193        unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
 194
 195        if (use_delay)
 196                blkcg_schedule_throttle(rqos->q, use_memdelay);
 197
 198        /*
 199         * To avoid priority inversions we want to just take a slot if we are
 200         * issuing as root.  If we're being killed off there's no point in
 201         * delaying things, we may have been killed by OOM so throttling may
 202         * make recovery take even longer, so just let the IO's through so the
 203         * task can go away.
 204         */
 205        if (issue_as_root || fatal_signal_pending(current)) {
 206                atomic_inc(&rqw->inflight);
 207                return;
 208        }
 209
 210        rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
 211}
 212
 213#define SCALE_DOWN_FACTOR 2
 214#define SCALE_UP_FACTOR 4
 215
 216static inline unsigned long scale_amount(unsigned long qd, bool up)
 217{
 218        return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
 219}
 220
 221/*
 222 * We scale the qd down faster than we scale up, so we need to use this helper
 223 * to adjust the scale_cookie accordingly so we don't prematurely get
 224 * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
 225 *
 226 * Each group has their own local copy of the last scale cookie they saw, so if
 227 * the global scale cookie goes up or down they know which way they need to go
 228 * based on their last knowledge of it.
 229 */
 230static void scale_cookie_change(struct blk_iolatency *blkiolat,
 231                                struct child_latency_info *lat_info,
 232                                bool up)
 233{
 234        unsigned long qd = blk_queue_depth(blkiolat->rqos.q);
 235        unsigned long scale = scale_amount(qd, up);
 236        unsigned long old = atomic_read(&lat_info->scale_cookie);
 237        unsigned long max_scale = qd << 1;
 238        unsigned long diff = 0;
 239
 240        if (old < DEFAULT_SCALE_COOKIE)
 241                diff = DEFAULT_SCALE_COOKIE - old;
 242
 243        if (up) {
 244                if (scale + old > DEFAULT_SCALE_COOKIE)
 245                        atomic_set(&lat_info->scale_cookie,
 246                                   DEFAULT_SCALE_COOKIE);
 247                else if (diff > qd)
 248                        atomic_inc(&lat_info->scale_cookie);
 249                else
 250                        atomic_add(scale, &lat_info->scale_cookie);
 251        } else {
 252                /*
 253                 * We don't want to dig a hole so deep that it takes us hours to
 254                 * dig out of it.  Just enough that we don't throttle/unthrottle
 255                 * with jagged workloads but can still unthrottle once pressure
 256                 * has sufficiently dissipated.
 257                 */
 258                if (diff > qd) {
 259                        if (diff < max_scale)
 260                                atomic_dec(&lat_info->scale_cookie);
 261                } else {
 262                        atomic_sub(scale, &lat_info->scale_cookie);
 263                }
 264        }
 265}
 266
 267/*
 268 * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
 269 * queue depth at a time so we don't get wild swings and hopefully dial in to
 270 * fairer distribution of the overall queue depth.
 271 */
 272static void scale_change(struct iolatency_grp *iolat, bool up)
 273{
 274        unsigned long qd = blk_queue_depth(iolat->blkiolat->rqos.q);
 275        unsigned long scale = scale_amount(qd, up);
 276        unsigned long old = iolat->rq_depth.max_depth;
 277        bool changed = false;
 278
 279        if (old > qd)
 280                old = qd;
 281
 282        if (up) {
 283                if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
 284                        return;
 285
 286                if (old < qd) {
 287                        changed = true;
 288                        old += scale;
 289                        old = min(old, qd);
 290                        iolat->rq_depth.max_depth = old;
 291                        wake_up_all(&iolat->rq_wait.wait);
 292                }
 293        } else if (old > 1) {
 294                old >>= 1;
 295                changed = true;
 296                iolat->rq_depth.max_depth = max(old, 1UL);
 297        }
 298}
 299
 300/* Check our parent and see if the scale cookie has changed. */
 301static void check_scale_change(struct iolatency_grp *iolat)
 302{
 303        struct iolatency_grp *parent;
 304        struct child_latency_info *lat_info;
 305        unsigned int cur_cookie;
 306        unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
 307        u64 scale_lat;
 308        unsigned int old;
 309        int direction = 0;
 310
 311        if (lat_to_blkg(iolat)->parent == NULL)
 312                return;
 313
 314        parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
 315        if (!parent)
 316                return;
 317
 318        lat_info = &parent->child_lat;
 319        cur_cookie = atomic_read(&lat_info->scale_cookie);
 320        scale_lat = READ_ONCE(lat_info->scale_lat);
 321
 322        if (cur_cookie < our_cookie)
 323                direction = -1;
 324        else if (cur_cookie > our_cookie)
 325                direction = 1;
 326        else
 327                return;
 328
 329        old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
 330
 331        /* Somebody beat us to the punch, just bail. */
 332        if (old != our_cookie)
 333                return;
 334
 335        if (direction < 0 && iolat->min_lat_nsec) {
 336                u64 samples_thresh;
 337
 338                if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
 339                        return;
 340
 341                /*
 342                 * Sometimes high priority groups are their own worst enemy, so
 343                 * instead of taking it out on some poor other group that did 5%
 344                 * or less of the IO's for the last summation just skip this
 345                 * scale down event.
 346                 */
 347                samples_thresh = lat_info->nr_samples * 5;
 348                samples_thresh = div64_u64(samples_thresh, 100);
 349                if (iolat->nr_samples <= samples_thresh)
 350                        return;
 351        }
 352
 353        /* We're as low as we can go. */
 354        if (iolat->rq_depth.max_depth == 1 && direction < 0) {
 355                blkcg_use_delay(lat_to_blkg(iolat));
 356                return;
 357        }
 358
 359        /* We're back to the default cookie, unthrottle all the things. */
 360        if (cur_cookie == DEFAULT_SCALE_COOKIE) {
 361                blkcg_clear_delay(lat_to_blkg(iolat));
 362                iolat->rq_depth.max_depth = UINT_MAX;
 363                wake_up_all(&iolat->rq_wait.wait);
 364                return;
 365        }
 366
 367        scale_change(iolat, direction > 0);
 368}
 369
 370static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
 371{
 372        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 373        struct blkcg_gq *blkg = bio->bi_blkg;
 374        bool issue_as_root = bio_issue_as_root_blkg(bio);
 375
 376        if (!blk_iolatency_enabled(blkiolat))
 377                return;
 378
 379        while (blkg && blkg->parent) {
 380                struct iolatency_grp *iolat = blkg_to_lat(blkg);
 381                if (!iolat) {
 382                        blkg = blkg->parent;
 383                        continue;
 384                }
 385
 386                check_scale_change(iolat);
 387                __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
 388                                     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
 389                blkg = blkg->parent;
 390        }
 391        if (!timer_pending(&blkiolat->timer))
 392                mod_timer(&blkiolat->timer, jiffies + HZ);
 393}
 394
 395static void iolatency_record_time(struct iolatency_grp *iolat,
 396                                  struct bio_issue *issue, u64 now,
 397                                  bool issue_as_root)
 398{
 399        struct blk_rq_stat *rq_stat;
 400        u64 start = bio_issue_time(issue);
 401        u64 req_time;
 402
 403        /*
 404         * Have to do this so we are truncated to the correct time that our
 405         * issue is truncated to.
 406         */
 407        now = __bio_issue_time(now);
 408
 409        if (now <= start)
 410                return;
 411
 412        req_time = now - start;
 413
 414        /*
 415         * We don't want to count issue_as_root bio's in the cgroups latency
 416         * statistics as it could skew the numbers downwards.
 417         */
 418        if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
 419                u64 sub = iolat->min_lat_nsec;
 420                if (req_time < sub)
 421                        blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
 422                return;
 423        }
 424
 425        rq_stat = get_cpu_ptr(iolat->stats);
 426        blk_rq_stat_add(rq_stat, req_time);
 427        put_cpu_ptr(rq_stat);
 428}
 429
 430#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
 431#define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
 432
 433static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
 434{
 435        struct blkcg_gq *blkg = lat_to_blkg(iolat);
 436        struct iolatency_grp *parent;
 437        struct child_latency_info *lat_info;
 438        struct blk_rq_stat stat;
 439        unsigned long flags;
 440        int cpu, exp_idx;
 441
 442        blk_rq_stat_init(&stat);
 443        preempt_disable();
 444        for_each_online_cpu(cpu) {
 445                struct blk_rq_stat *s;
 446                s = per_cpu_ptr(iolat->stats, cpu);
 447                blk_rq_stat_sum(&stat, s);
 448                blk_rq_stat_init(s);
 449        }
 450        preempt_enable();
 451
 452        parent = blkg_to_lat(blkg->parent);
 453        if (!parent)
 454                return;
 455
 456        lat_info = &parent->child_lat;
 457
 458        /*
 459         * calc_load() takes in a number stored in fixed point representation.
 460         * Because we are using this for IO time in ns, the values stored
 461         * are significantly larger than the FIXED_1 denominator (2048).
 462         * Therefore, rounding errors in the calculation are negligible and
 463         * can be ignored.
 464         */
 465        exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
 466                        div64_u64(iolat->cur_win_nsec,
 467                                  BLKIOLATENCY_EXP_BUCKET_SIZE));
 468        iolat->lat_avg = calc_load(iolat->lat_avg,
 469                                   iolatency_exp_factors[exp_idx],
 470                                   stat.mean);
 471
 472        /* Everything is ok and we don't need to adjust the scale. */
 473        if (stat.mean <= iolat->min_lat_nsec &&
 474            atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
 475                return;
 476
 477        /* Somebody beat us to the punch, just bail. */
 478        spin_lock_irqsave(&lat_info->lock, flags);
 479        lat_info->nr_samples -= iolat->nr_samples;
 480        lat_info->nr_samples += stat.nr_samples;
 481        iolat->nr_samples = stat.nr_samples;
 482
 483        if ((lat_info->last_scale_event >= now ||
 484            now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME) &&
 485            lat_info->scale_lat <= iolat->min_lat_nsec)
 486                goto out;
 487
 488        if (stat.mean <= iolat->min_lat_nsec &&
 489            stat.nr_samples >= BLKIOLATENCY_MIN_GOOD_SAMPLES) {
 490                if (lat_info->scale_grp == iolat) {
 491                        lat_info->last_scale_event = now;
 492                        scale_cookie_change(iolat->blkiolat, lat_info, true);
 493                }
 494        } else if (stat.mean > iolat->min_lat_nsec) {
 495                lat_info->last_scale_event = now;
 496                if (!lat_info->scale_grp ||
 497                    lat_info->scale_lat > iolat->min_lat_nsec) {
 498                        WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
 499                        lat_info->scale_grp = iolat;
 500                }
 501                scale_cookie_change(iolat->blkiolat, lat_info, false);
 502        }
 503out:
 504        spin_unlock_irqrestore(&lat_info->lock, flags);
 505}
 506
 507static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
 508{
 509        struct blkcg_gq *blkg;
 510        struct rq_wait *rqw;
 511        struct iolatency_grp *iolat;
 512        u64 window_start;
 513        u64 now = ktime_to_ns(ktime_get());
 514        bool issue_as_root = bio_issue_as_root_blkg(bio);
 515        bool enabled = false;
 516
 517        blkg = bio->bi_blkg;
 518        if (!blkg)
 519                return;
 520
 521        iolat = blkg_to_lat(bio->bi_blkg);
 522        if (!iolat)
 523                return;
 524
 525        enabled = blk_iolatency_enabled(iolat->blkiolat);
 526        while (blkg && blkg->parent) {
 527                iolat = blkg_to_lat(blkg);
 528                if (!iolat) {
 529                        blkg = blkg->parent;
 530                        continue;
 531                }
 532                rqw = &iolat->rq_wait;
 533
 534                atomic_dec(&rqw->inflight);
 535                if (!enabled || iolat->min_lat_nsec == 0)
 536                        goto next;
 537                iolatency_record_time(iolat, &bio->bi_issue, now,
 538                                      issue_as_root);
 539                window_start = atomic64_read(&iolat->window_start);
 540                if (now > window_start &&
 541                    (now - window_start) >= iolat->cur_win_nsec) {
 542                        if (atomic64_cmpxchg(&iolat->window_start,
 543                                        window_start, now) == window_start)
 544                                iolatency_check_latencies(iolat, now);
 545                }
 546next:
 547                wake_up(&rqw->wait);
 548                blkg = blkg->parent;
 549        }
 550}
 551
 552static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
 553{
 554        struct blkcg_gq *blkg;
 555
 556        blkg = bio->bi_blkg;
 557        while (blkg && blkg->parent) {
 558                struct rq_wait *rqw;
 559                struct iolatency_grp *iolat;
 560
 561                iolat = blkg_to_lat(blkg);
 562                if (!iolat)
 563                        goto next;
 564
 565                rqw = &iolat->rq_wait;
 566                atomic_dec(&rqw->inflight);
 567                wake_up(&rqw->wait);
 568next:
 569                blkg = blkg->parent;
 570        }
 571}
 572
 573static void blkcg_iolatency_exit(struct rq_qos *rqos)
 574{
 575        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 576
 577        del_timer_sync(&blkiolat->timer);
 578        blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
 579        kfree(blkiolat);
 580}
 581
 582static struct rq_qos_ops blkcg_iolatency_ops = {
 583        .throttle = blkcg_iolatency_throttle,
 584        .cleanup = blkcg_iolatency_cleanup,
 585        .done_bio = blkcg_iolatency_done_bio,
 586        .exit = blkcg_iolatency_exit,
 587};
 588
 589static void blkiolatency_timer_fn(struct timer_list *t)
 590{
 591        struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
 592        struct blkcg_gq *blkg;
 593        struct cgroup_subsys_state *pos_css;
 594        u64 now = ktime_to_ns(ktime_get());
 595
 596        rcu_read_lock();
 597        blkg_for_each_descendant_pre(blkg, pos_css,
 598                                     blkiolat->rqos.q->root_blkg) {
 599                struct iolatency_grp *iolat;
 600                struct child_latency_info *lat_info;
 601                unsigned long flags;
 602                u64 cookie;
 603
 604                /*
 605                 * We could be exiting, don't access the pd unless we have a
 606                 * ref on the blkg.
 607                 */
 608                if (!blkg_tryget(blkg))
 609                        continue;
 610
 611                iolat = blkg_to_lat(blkg);
 612                if (!iolat)
 613                        goto next;
 614
 615                lat_info = &iolat->child_lat;
 616                cookie = atomic_read(&lat_info->scale_cookie);
 617
 618                if (cookie >= DEFAULT_SCALE_COOKIE)
 619                        goto next;
 620
 621                spin_lock_irqsave(&lat_info->lock, flags);
 622                if (lat_info->last_scale_event >= now)
 623                        goto next_lock;
 624
 625                /*
 626                 * We scaled down but don't have a scale_grp, scale up and carry
 627                 * on.
 628                 */
 629                if (lat_info->scale_grp == NULL) {
 630                        scale_cookie_change(iolat->blkiolat, lat_info, true);
 631                        goto next_lock;
 632                }
 633
 634                /*
 635                 * It's been 5 seconds since our last scale event, clear the
 636                 * scale grp in case the group that needed the scale down isn't
 637                 * doing any IO currently.
 638                 */
 639                if (now - lat_info->last_scale_event >=
 640                    ((u64)NSEC_PER_SEC * 5))
 641                        lat_info->scale_grp = NULL;
 642next_lock:
 643                spin_unlock_irqrestore(&lat_info->lock, flags);
 644next:
 645                blkg_put(blkg);
 646        }
 647        rcu_read_unlock();
 648}
 649
 650int blk_iolatency_init(struct request_queue *q)
 651{
 652        struct blk_iolatency *blkiolat;
 653        struct rq_qos *rqos;
 654        int ret;
 655
 656        blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
 657        if (!blkiolat)
 658                return -ENOMEM;
 659
 660        rqos = &blkiolat->rqos;
 661        rqos->id = RQ_QOS_CGROUP;
 662        rqos->ops = &blkcg_iolatency_ops;
 663        rqos->q = q;
 664
 665        rq_qos_add(q, rqos);
 666
 667        ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
 668        if (ret) {
 669                rq_qos_del(q, rqos);
 670                kfree(blkiolat);
 671                return ret;
 672        }
 673
 674        timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
 675
 676        return 0;
 677}
 678
 679static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
 680{
 681        struct iolatency_grp *iolat = blkg_to_lat(blkg);
 682        struct blk_iolatency *blkiolat = iolat->blkiolat;
 683        u64 oldval = iolat->min_lat_nsec;
 684
 685        iolat->min_lat_nsec = val;
 686        iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
 687        iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
 688                                    BLKIOLATENCY_MAX_WIN_SIZE);
 689
 690        if (!oldval && val)
 691                atomic_inc(&blkiolat->enabled);
 692        if (oldval && !val)
 693                atomic_dec(&blkiolat->enabled);
 694}
 695
 696static void iolatency_clear_scaling(struct blkcg_gq *blkg)
 697{
 698        if (blkg->parent) {
 699                struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
 700                struct child_latency_info *lat_info;
 701                if (!iolat)
 702                        return;
 703
 704                lat_info = &iolat->child_lat;
 705                spin_lock(&lat_info->lock);
 706                atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
 707                lat_info->last_scale_event = 0;
 708                lat_info->scale_grp = NULL;
 709                lat_info->scale_lat = 0;
 710                spin_unlock(&lat_info->lock);
 711        }
 712}
 713
 714static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
 715                             size_t nbytes, loff_t off)
 716{
 717        struct blkcg *blkcg = css_to_blkcg(of_css(of));
 718        struct blkcg_gq *blkg;
 719        struct blk_iolatency *blkiolat;
 720        struct blkg_conf_ctx ctx;
 721        struct iolatency_grp *iolat;
 722        char *p, *tok;
 723        u64 lat_val = 0;
 724        u64 oldval;
 725        int ret;
 726
 727        ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
 728        if (ret)
 729                return ret;
 730
 731        iolat = blkg_to_lat(ctx.blkg);
 732        blkiolat = iolat->blkiolat;
 733        p = ctx.body;
 734
 735        ret = -EINVAL;
 736        while ((tok = strsep(&p, " "))) {
 737                char key[16];
 738                char val[21];   /* 18446744073709551616 */
 739
 740                if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
 741                        goto out;
 742
 743                if (!strcmp(key, "target")) {
 744                        u64 v;
 745
 746                        if (!strcmp(val, "max"))
 747                                lat_val = 0;
 748                        else if (sscanf(val, "%llu", &v) == 1)
 749                                lat_val = v * NSEC_PER_USEC;
 750                        else
 751                                goto out;
 752                } else {
 753                        goto out;
 754                }
 755        }
 756
 757        /* Walk up the tree to see if our new val is lower than it should be. */
 758        blkg = ctx.blkg;
 759        oldval = iolat->min_lat_nsec;
 760
 761        iolatency_set_min_lat_nsec(blkg, lat_val);
 762        if (oldval != iolat->min_lat_nsec) {
 763                iolatency_clear_scaling(blkg);
 764        }
 765
 766        ret = 0;
 767out:
 768        blkg_conf_finish(&ctx);
 769        return ret ?: nbytes;
 770}
 771
 772static u64 iolatency_prfill_limit(struct seq_file *sf,
 773                                  struct blkg_policy_data *pd, int off)
 774{
 775        struct iolatency_grp *iolat = pd_to_lat(pd);
 776        const char *dname = blkg_dev_name(pd->blkg);
 777
 778        if (!dname || !iolat->min_lat_nsec)
 779                return 0;
 780        seq_printf(sf, "%s target=%llu\n",
 781                   dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
 782        return 0;
 783}
 784
 785static int iolatency_print_limit(struct seq_file *sf, void *v)
 786{
 787        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 788                          iolatency_prfill_limit,
 789                          &blkcg_policy_iolatency, seq_cft(sf)->private, false);
 790        return 0;
 791}
 792
 793static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
 794                                size_t size)
 795{
 796        struct iolatency_grp *iolat = pd_to_lat(pd);
 797        unsigned long long avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
 798        unsigned long long cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
 799
 800        if (iolat->rq_depth.max_depth == UINT_MAX)
 801                return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
 802                                 avg_lat, cur_win);
 803
 804        return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
 805                         iolat->rq_depth.max_depth, avg_lat, cur_win);
 806}
 807
 808
 809static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
 810{
 811        struct iolatency_grp *iolat;
 812
 813        iolat = kzalloc_node(sizeof(*iolat), gfp, node);
 814        if (!iolat)
 815                return NULL;
 816        iolat->stats = __alloc_percpu_gfp(sizeof(struct blk_rq_stat),
 817                                       __alignof__(struct blk_rq_stat), gfp);
 818        if (!iolat->stats) {
 819                kfree(iolat);
 820                return NULL;
 821        }
 822        return &iolat->pd;
 823}
 824
 825static void iolatency_pd_init(struct blkg_policy_data *pd)
 826{
 827        struct iolatency_grp *iolat = pd_to_lat(pd);
 828        struct blkcg_gq *blkg = lat_to_blkg(iolat);
 829        struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
 830        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 831        u64 now = ktime_to_ns(ktime_get());
 832        int cpu;
 833
 834        for_each_possible_cpu(cpu) {
 835                struct blk_rq_stat *stat;
 836                stat = per_cpu_ptr(iolat->stats, cpu);
 837                blk_rq_stat_init(stat);
 838        }
 839
 840        rq_wait_init(&iolat->rq_wait);
 841        spin_lock_init(&iolat->child_lat.lock);
 842        iolat->rq_depth.queue_depth = blk_queue_depth(blkg->q);
 843        iolat->rq_depth.max_depth = UINT_MAX;
 844        iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
 845        iolat->blkiolat = blkiolat;
 846        iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
 847        atomic64_set(&iolat->window_start, now);
 848
 849        /*
 850         * We init things in list order, so the pd for the parent may not be
 851         * init'ed yet for whatever reason.
 852         */
 853        if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
 854                struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
 855                atomic_set(&iolat->scale_cookie,
 856                           atomic_read(&parent->child_lat.scale_cookie));
 857        } else {
 858                atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
 859        }
 860
 861        atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
 862}
 863
 864static void iolatency_pd_offline(struct blkg_policy_data *pd)
 865{
 866        struct iolatency_grp *iolat = pd_to_lat(pd);
 867        struct blkcg_gq *blkg = lat_to_blkg(iolat);
 868
 869        iolatency_set_min_lat_nsec(blkg, 0);
 870        iolatency_clear_scaling(blkg);
 871}
 872
 873static void iolatency_pd_free(struct blkg_policy_data *pd)
 874{
 875        struct iolatency_grp *iolat = pd_to_lat(pd);
 876        free_percpu(iolat->stats);
 877        kfree(iolat);
 878}
 879
 880static struct cftype iolatency_files[] = {
 881        {
 882                .name = "latency",
 883                .flags = CFTYPE_NOT_ON_ROOT,
 884                .seq_show = iolatency_print_limit,
 885                .write = iolatency_set_limit,
 886        },
 887        {}
 888};
 889
 890static struct blkcg_policy blkcg_policy_iolatency = {
 891        .dfl_cftypes    = iolatency_files,
 892        .pd_alloc_fn    = iolatency_pd_alloc,
 893        .pd_init_fn     = iolatency_pd_init,
 894        .pd_offline_fn  = iolatency_pd_offline,
 895        .pd_free_fn     = iolatency_pd_free,
 896        .pd_stat_fn     = iolatency_pd_stat,
 897};
 898
 899static int __init iolatency_init(void)
 900{
 901        return blkcg_policy_register(&blkcg_policy_iolatency);
 902}
 903
 904static void __exit iolatency_exit(void)
 905{
 906        return blkcg_policy_unregister(&blkcg_policy_iolatency);
 907}
 908
 909module_init(iolatency_init);
 910module_exit(iolatency_exit);
 911