linux/block/blk-cgroup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Common Block IO controller cgroup interface
   4 *
   5 * Based on ideas and code from CFQ, CFS and BFQ:
   6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   7 *
   8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   9 *                    Paolo Valente <paolo.valente@unimore.it>
  10 *
  11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  12 *                    Nauman Rafique <nauman@google.com>
  13 *
  14 * For policy-specific per-blkcg data:
  15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  16 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
  17 */
  18#include <linux/ioprio.h>
  19#include <linux/kdev_t.h>
  20#include <linux/module.h>
  21#include <linux/sched/signal.h>
  22#include <linux/err.h>
  23#include <linux/blkdev.h>
  24#include <linux/backing-dev.h>
  25#include <linux/slab.h>
  26#include <linux/genhd.h>
  27#include <linux/delay.h>
  28#include <linux/atomic.h>
  29#include <linux/ctype.h>
  30#include <linux/blk-cgroup.h>
  31#include <linux/tracehook.h>
  32#include <linux/psi.h>
  33#include "blk.h"
  34
  35#define MAX_KEY_LEN 100
  36
  37/*
  38 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  39 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  40 * policy [un]register operations including cgroup file additions /
  41 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
  42 * allows grabbing it from cgroup callbacks.
  43 */
  44static DEFINE_MUTEX(blkcg_pol_register_mutex);
  45static DEFINE_MUTEX(blkcg_pol_mutex);
  46
  47struct blkcg blkcg_root;
  48EXPORT_SYMBOL_GPL(blkcg_root);
  49
  50struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
  51EXPORT_SYMBOL_GPL(blkcg_root_css);
  52
  53static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  54
  55static LIST_HEAD(all_blkcgs);           /* protected by blkcg_pol_mutex */
  56
  57bool blkcg_debug_stats = false;
  58static struct workqueue_struct *blkcg_punt_bio_wq;
  59
  60static bool blkcg_policy_enabled(struct request_queue *q,
  61                                 const struct blkcg_policy *pol)
  62{
  63        return pol && test_bit(pol->plid, q->blkcg_pols);
  64}
  65
  66/**
  67 * blkg_free - free a blkg
  68 * @blkg: blkg to free
  69 *
  70 * Free @blkg which may be partially allocated.
  71 */
  72static void blkg_free(struct blkcg_gq *blkg)
  73{
  74        int i;
  75
  76        if (!blkg)
  77                return;
  78
  79        for (i = 0; i < BLKCG_MAX_POLS; i++)
  80                if (blkg->pd[i])
  81                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
  82
  83        blkg_rwstat_exit(&blkg->stat_ios);
  84        blkg_rwstat_exit(&blkg->stat_bytes);
  85        percpu_ref_exit(&blkg->refcnt);
  86        kfree(blkg);
  87}
  88
  89static void __blkg_release(struct rcu_head *rcu)
  90{
  91        struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
  92
  93        WARN_ON(!bio_list_empty(&blkg->async_bios));
  94
  95        /* release the blkcg and parent blkg refs this blkg has been holding */
  96        css_put(&blkg->blkcg->css);
  97        if (blkg->parent)
  98                blkg_put(blkg->parent);
  99
 100        wb_congested_put(blkg->wb_congested);
 101
 102        blkg_free(blkg);
 103}
 104
 105/*
 106 * A group is RCU protected, but having an rcu lock does not mean that one
 107 * can access all the fields of blkg and assume these are valid.  For
 108 * example, don't try to follow throtl_data and request queue links.
 109 *
 110 * Having a reference to blkg under an rcu allows accesses to only values
 111 * local to groups like group stats and group rate limits.
 112 */
 113static void blkg_release(struct percpu_ref *ref)
 114{
 115        struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
 116
 117        call_rcu(&blkg->rcu_head, __blkg_release);
 118}
 119
 120static void blkg_async_bio_workfn(struct work_struct *work)
 121{
 122        struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
 123                                             async_bio_work);
 124        struct bio_list bios = BIO_EMPTY_LIST;
 125        struct bio *bio;
 126
 127        /* as long as there are pending bios, @blkg can't go away */
 128        spin_lock_bh(&blkg->async_bio_lock);
 129        bio_list_merge(&bios, &blkg->async_bios);
 130        bio_list_init(&blkg->async_bios);
 131        spin_unlock_bh(&blkg->async_bio_lock);
 132
 133        while ((bio = bio_list_pop(&bios)))
 134                submit_bio(bio);
 135}
 136
 137/**
 138 * blkg_alloc - allocate a blkg
 139 * @blkcg: block cgroup the new blkg is associated with
 140 * @q: request_queue the new blkg is associated with
 141 * @gfp_mask: allocation mask to use
 142 *
 143 * Allocate a new blkg assocating @blkcg and @q.
 144 */
 145static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
 146                                   gfp_t gfp_mask)
 147{
 148        struct blkcg_gq *blkg;
 149        int i;
 150
 151        /* alloc and init base part */
 152        blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
 153        if (!blkg)
 154                return NULL;
 155
 156        if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
 157                goto err_free;
 158
 159        if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
 160            blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
 161                goto err_free;
 162
 163        blkg->q = q;
 164        INIT_LIST_HEAD(&blkg->q_node);
 165        spin_lock_init(&blkg->async_bio_lock);
 166        bio_list_init(&blkg->async_bios);
 167        INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
 168        blkg->blkcg = blkcg;
 169
 170        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 171                struct blkcg_policy *pol = blkcg_policy[i];
 172                struct blkg_policy_data *pd;
 173
 174                if (!blkcg_policy_enabled(q, pol))
 175                        continue;
 176
 177                /* alloc per-policy data and attach it to blkg */
 178                pd = pol->pd_alloc_fn(gfp_mask, q->node);
 179                if (!pd)
 180                        goto err_free;
 181
 182                blkg->pd[i] = pd;
 183                pd->blkg = blkg;
 184                pd->plid = i;
 185        }
 186
 187        return blkg;
 188
 189err_free:
 190        blkg_free(blkg);
 191        return NULL;
 192}
 193
 194struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 195                                      struct request_queue *q, bool update_hint)
 196{
 197        struct blkcg_gq *blkg;
 198
 199        /*
 200         * Hint didn't match.  Look up from the radix tree.  Note that the
 201         * hint can only be updated under queue_lock as otherwise @blkg
 202         * could have already been removed from blkg_tree.  The caller is
 203         * responsible for grabbing queue_lock if @update_hint.
 204         */
 205        blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 206        if (blkg && blkg->q == q) {
 207                if (update_hint) {
 208                        lockdep_assert_held(&q->queue_lock);
 209                        rcu_assign_pointer(blkcg->blkg_hint, blkg);
 210                }
 211                return blkg;
 212        }
 213
 214        return NULL;
 215}
 216EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
 217
 218/*
 219 * If @new_blkg is %NULL, this function tries to allocate a new one as
 220 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
 221 */
 222static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 223                                    struct request_queue *q,
 224                                    struct blkcg_gq *new_blkg)
 225{
 226        struct blkcg_gq *blkg;
 227        struct bdi_writeback_congested *wb_congested;
 228        int i, ret;
 229
 230        WARN_ON_ONCE(!rcu_read_lock_held());
 231        lockdep_assert_held(&q->queue_lock);
 232
 233        /* request_queue is dying, do not create/recreate a blkg */
 234        if (blk_queue_dying(q)) {
 235                ret = -ENODEV;
 236                goto err_free_blkg;
 237        }
 238
 239        /* blkg holds a reference to blkcg */
 240        if (!css_tryget_online(&blkcg->css)) {
 241                ret = -ENODEV;
 242                goto err_free_blkg;
 243        }
 244
 245        wb_congested = wb_congested_get_create(q->backing_dev_info,
 246                                               blkcg->css.id,
 247                                               GFP_NOWAIT | __GFP_NOWARN);
 248        if (!wb_congested) {
 249                ret = -ENOMEM;
 250                goto err_put_css;
 251        }
 252
 253        /* allocate */
 254        if (!new_blkg) {
 255                new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
 256                if (unlikely(!new_blkg)) {
 257                        ret = -ENOMEM;
 258                        goto err_put_congested;
 259                }
 260        }
 261        blkg = new_blkg;
 262        blkg->wb_congested = wb_congested;
 263
 264        /* link parent */
 265        if (blkcg_parent(blkcg)) {
 266                blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
 267                if (WARN_ON_ONCE(!blkg->parent)) {
 268                        ret = -ENODEV;
 269                        goto err_put_congested;
 270                }
 271                blkg_get(blkg->parent);
 272        }
 273
 274        /* invoke per-policy init */
 275        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 276                struct blkcg_policy *pol = blkcg_policy[i];
 277
 278                if (blkg->pd[i] && pol->pd_init_fn)
 279                        pol->pd_init_fn(blkg->pd[i]);
 280        }
 281
 282        /* insert */
 283        spin_lock(&blkcg->lock);
 284        ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 285        if (likely(!ret)) {
 286                hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 287                list_add(&blkg->q_node, &q->blkg_list);
 288
 289                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 290                        struct blkcg_policy *pol = blkcg_policy[i];
 291
 292                        if (blkg->pd[i] && pol->pd_online_fn)
 293                                pol->pd_online_fn(blkg->pd[i]);
 294                }
 295        }
 296        blkg->online = true;
 297        spin_unlock(&blkcg->lock);
 298
 299        if (!ret)
 300                return blkg;
 301
 302        /* @blkg failed fully initialized, use the usual release path */
 303        blkg_put(blkg);
 304        return ERR_PTR(ret);
 305
 306err_put_congested:
 307        wb_congested_put(wb_congested);
 308err_put_css:
 309        css_put(&blkcg->css);
 310err_free_blkg:
 311        blkg_free(new_blkg);
 312        return ERR_PTR(ret);
 313}
 314
 315/**
 316 * __blkg_lookup_create - lookup blkg, try to create one if not there
 317 * @blkcg: blkcg of interest
 318 * @q: request_queue of interest
 319 *
 320 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
 321 * create one.  blkg creation is performed recursively from blkcg_root such
 322 * that all non-root blkg's have access to the parent blkg.  This function
 323 * should be called under RCU read lock and @q->queue_lock.
 324 *
 325 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
 326 * down from root.
 327 */
 328struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
 329                                      struct request_queue *q)
 330{
 331        struct blkcg_gq *blkg;
 332
 333        WARN_ON_ONCE(!rcu_read_lock_held());
 334        lockdep_assert_held(&q->queue_lock);
 335
 336        blkg = __blkg_lookup(blkcg, q, true);
 337        if (blkg)
 338                return blkg;
 339
 340        /*
 341         * Create blkgs walking down from blkcg_root to @blkcg, so that all
 342         * non-root blkgs have access to their parents.  Returns the closest
 343         * blkg to the intended blkg should blkg_create() fail.
 344         */
 345        while (true) {
 346                struct blkcg *pos = blkcg;
 347                struct blkcg *parent = blkcg_parent(blkcg);
 348                struct blkcg_gq *ret_blkg = q->root_blkg;
 349
 350                while (parent) {
 351                        blkg = __blkg_lookup(parent, q, false);
 352                        if (blkg) {
 353                                /* remember closest blkg */
 354                                ret_blkg = blkg;
 355                                break;
 356                        }
 357                        pos = parent;
 358                        parent = blkcg_parent(parent);
 359                }
 360
 361                blkg = blkg_create(pos, q, NULL);
 362                if (IS_ERR(blkg))
 363                        return ret_blkg;
 364                if (pos == blkcg)
 365                        return blkg;
 366        }
 367}
 368
 369/**
 370 * blkg_lookup_create - find or create a blkg
 371 * @blkcg: target block cgroup
 372 * @q: target request_queue
 373 *
 374 * This looks up or creates the blkg representing the unique pair
 375 * of the blkcg and the request_queue.
 376 */
 377struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 378                                    struct request_queue *q)
 379{
 380        struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
 381
 382        if (unlikely(!blkg)) {
 383                unsigned long flags;
 384
 385                spin_lock_irqsave(&q->queue_lock, flags);
 386                blkg = __blkg_lookup_create(blkcg, q);
 387                spin_unlock_irqrestore(&q->queue_lock, flags);
 388        }
 389
 390        return blkg;
 391}
 392
 393static void blkg_destroy(struct blkcg_gq *blkg)
 394{
 395        struct blkcg *blkcg = blkg->blkcg;
 396        struct blkcg_gq *parent = blkg->parent;
 397        int i;
 398
 399        lockdep_assert_held(&blkg->q->queue_lock);
 400        lockdep_assert_held(&blkcg->lock);
 401
 402        /* Something wrong if we are trying to remove same group twice */
 403        WARN_ON_ONCE(list_empty(&blkg->q_node));
 404        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 405
 406        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 407                struct blkcg_policy *pol = blkcg_policy[i];
 408
 409                if (blkg->pd[i] && pol->pd_offline_fn)
 410                        pol->pd_offline_fn(blkg->pd[i]);
 411        }
 412
 413        if (parent) {
 414                blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
 415                blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
 416        }
 417
 418        blkg->online = false;
 419
 420        radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 421        list_del_init(&blkg->q_node);
 422        hlist_del_init_rcu(&blkg->blkcg_node);
 423
 424        /*
 425         * Both setting lookup hint to and clearing it from @blkg are done
 426         * under queue_lock.  If it's not pointing to @blkg now, it never
 427         * will.  Hint assignment itself can race safely.
 428         */
 429        if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
 430                rcu_assign_pointer(blkcg->blkg_hint, NULL);
 431
 432        /*
 433         * Put the reference taken at the time of creation so that when all
 434         * queues are gone, group can be destroyed.
 435         */
 436        percpu_ref_kill(&blkg->refcnt);
 437}
 438
 439/**
 440 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 441 * @q: request_queue of interest
 442 *
 443 * Destroy all blkgs associated with @q.
 444 */
 445static void blkg_destroy_all(struct request_queue *q)
 446{
 447        struct blkcg_gq *blkg, *n;
 448
 449        spin_lock_irq(&q->queue_lock);
 450        list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 451                struct blkcg *blkcg = blkg->blkcg;
 452
 453                spin_lock(&blkcg->lock);
 454                blkg_destroy(blkg);
 455                spin_unlock(&blkcg->lock);
 456        }
 457
 458        q->root_blkg = NULL;
 459        spin_unlock_irq(&q->queue_lock);
 460}
 461
 462static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 463                             struct cftype *cftype, u64 val)
 464{
 465        struct blkcg *blkcg = css_to_blkcg(css);
 466        struct blkcg_gq *blkg;
 467        int i;
 468
 469        mutex_lock(&blkcg_pol_mutex);
 470        spin_lock_irq(&blkcg->lock);
 471
 472        /*
 473         * Note that stat reset is racy - it doesn't synchronize against
 474         * stat updates.  This is a debug feature which shouldn't exist
 475         * anyway.  If you get hit by a race, retry.
 476         */
 477        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 478                blkg_rwstat_reset(&blkg->stat_bytes);
 479                blkg_rwstat_reset(&blkg->stat_ios);
 480
 481                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 482                        struct blkcg_policy *pol = blkcg_policy[i];
 483
 484                        if (blkg->pd[i] && pol->pd_reset_stats_fn)
 485                                pol->pd_reset_stats_fn(blkg->pd[i]);
 486                }
 487        }
 488
 489        spin_unlock_irq(&blkcg->lock);
 490        mutex_unlock(&blkcg_pol_mutex);
 491        return 0;
 492}
 493
 494const char *blkg_dev_name(struct blkcg_gq *blkg)
 495{
 496        /* some drivers (floppy) instantiate a queue w/o disk registered */
 497        if (blkg->q->backing_dev_info->dev)
 498                return dev_name(blkg->q->backing_dev_info->dev);
 499        return NULL;
 500}
 501
 502/**
 503 * blkcg_print_blkgs - helper for printing per-blkg data
 504 * @sf: seq_file to print to
 505 * @blkcg: blkcg of interest
 506 * @prfill: fill function to print out a blkg
 507 * @pol: policy in question
 508 * @data: data to be passed to @prfill
 509 * @show_total: to print out sum of prfill return values or not
 510 *
 511 * This function invokes @prfill on each blkg of @blkcg if pd for the
 512 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 513 * policy data and @data and the matching queue lock held.  If @show_total
 514 * is %true, the sum of the return values from @prfill is printed with
 515 * "Total" label at the end.
 516 *
 517 * This is to be used to construct print functions for
 518 * cftype->read_seq_string method.
 519 */
 520void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 521                       u64 (*prfill)(struct seq_file *,
 522                                     struct blkg_policy_data *, int),
 523                       const struct blkcg_policy *pol, int data,
 524                       bool show_total)
 525{
 526        struct blkcg_gq *blkg;
 527        u64 total = 0;
 528
 529        rcu_read_lock();
 530        hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 531                spin_lock_irq(&blkg->q->queue_lock);
 532                if (blkcg_policy_enabled(blkg->q, pol))
 533                        total += prfill(sf, blkg->pd[pol->plid], data);
 534                spin_unlock_irq(&blkg->q->queue_lock);
 535        }
 536        rcu_read_unlock();
 537
 538        if (show_total)
 539                seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 540}
 541EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 542
 543/**
 544 * __blkg_prfill_u64 - prfill helper for a single u64 value
 545 * @sf: seq_file to print to
 546 * @pd: policy private data of interest
 547 * @v: value to print
 548 *
 549 * Print @v to @sf for the device assocaited with @pd.
 550 */
 551u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 552{
 553        const char *dname = blkg_dev_name(pd->blkg);
 554
 555        if (!dname)
 556                return 0;
 557
 558        seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 559        return v;
 560}
 561EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 562
 563/**
 564 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 565 * @sf: seq_file to print to
 566 * @pd: policy private data of interest
 567 * @rwstat: rwstat to print
 568 *
 569 * Print @rwstat to @sf for the device assocaited with @pd.
 570 */
 571u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 572                         const struct blkg_rwstat_sample *rwstat)
 573{
 574        static const char *rwstr[] = {
 575                [BLKG_RWSTAT_READ]      = "Read",
 576                [BLKG_RWSTAT_WRITE]     = "Write",
 577                [BLKG_RWSTAT_SYNC]      = "Sync",
 578                [BLKG_RWSTAT_ASYNC]     = "Async",
 579                [BLKG_RWSTAT_DISCARD]   = "Discard",
 580        };
 581        const char *dname = blkg_dev_name(pd->blkg);
 582        u64 v;
 583        int i;
 584
 585        if (!dname)
 586                return 0;
 587
 588        for (i = 0; i < BLKG_RWSTAT_NR; i++)
 589                seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
 590                           rwstat->cnt[i]);
 591
 592        v = rwstat->cnt[BLKG_RWSTAT_READ] +
 593                rwstat->cnt[BLKG_RWSTAT_WRITE] +
 594                rwstat->cnt[BLKG_RWSTAT_DISCARD];
 595        seq_printf(sf, "%s Total %llu\n", dname, v);
 596        return v;
 597}
 598EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
 599
 600/**
 601 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 602 * @sf: seq_file to print to
 603 * @pd: policy private data of interest
 604 * @off: offset to the blkg_rwstat in @pd
 605 *
 606 * prfill callback for printing a blkg_rwstat.
 607 */
 608u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 609                       int off)
 610{
 611        struct blkg_rwstat_sample rwstat = { };
 612
 613        blkg_rwstat_read((void *)pd + off, &rwstat);
 614        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 615}
 616EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 617
 618static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
 619                                    struct blkg_policy_data *pd, int off)
 620{
 621        struct blkg_rwstat_sample rwstat = { };
 622
 623        blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
 624        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 625}
 626
 627/**
 628 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
 629 * @sf: seq_file to print to
 630 * @v: unused
 631 *
 632 * To be used as cftype->seq_show to print blkg->stat_bytes.
 633 * cftype->private must be set to the blkcg_policy.
 634 */
 635int blkg_print_stat_bytes(struct seq_file *sf, void *v)
 636{
 637        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 638                          blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 639                          offsetof(struct blkcg_gq, stat_bytes), true);
 640        return 0;
 641}
 642EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
 643
 644/**
 645 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
 646 * @sf: seq_file to print to
 647 * @v: unused
 648 *
 649 * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
 650 * must be set to the blkcg_policy.
 651 */
 652int blkg_print_stat_ios(struct seq_file *sf, void *v)
 653{
 654        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 655                          blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 656                          offsetof(struct blkcg_gq, stat_ios), true);
 657        return 0;
 658}
 659EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
 660
 661static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
 662                                              struct blkg_policy_data *pd,
 663                                              int off)
 664{
 665        struct blkg_rwstat_sample rwstat;
 666
 667        blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
 668        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 669}
 670
 671/**
 672 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
 673 * @sf: seq_file to print to
 674 * @v: unused
 675 */
 676int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
 677{
 678        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 679                          blkg_prfill_rwstat_field_recursive,
 680                          (void *)seq_cft(sf)->private,
 681                          offsetof(struct blkcg_gq, stat_bytes), true);
 682        return 0;
 683}
 684EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
 685
 686/**
 687 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
 688 * @sf: seq_file to print to
 689 * @v: unused
 690 */
 691int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
 692{
 693        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 694                          blkg_prfill_rwstat_field_recursive,
 695                          (void *)seq_cft(sf)->private,
 696                          offsetof(struct blkcg_gq, stat_ios), true);
 697        return 0;
 698}
 699EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
 700
 701/**
 702 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 703 * @blkg: blkg of interest
 704 * @pol: blkcg_policy which contains the blkg_rwstat
 705 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
 706 * @sum: blkg_rwstat_sample structure containing the results
 707 *
 708 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
 709 * online descendants and their aux counts.  The caller must be holding the
 710 * queue lock for online tests.
 711 *
 712 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
 713 * is at @off bytes into @blkg's blkg_policy_data of the policy.
 714 */
 715void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
 716                int off, struct blkg_rwstat_sample *sum)
 717{
 718        struct blkcg_gq *pos_blkg;
 719        struct cgroup_subsys_state *pos_css;
 720        unsigned int i;
 721
 722        lockdep_assert_held(&blkg->q->queue_lock);
 723
 724        rcu_read_lock();
 725        blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 726                struct blkg_rwstat *rwstat;
 727
 728                if (!pos_blkg->online)
 729                        continue;
 730
 731                if (pol)
 732                        rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 733                else
 734                        rwstat = (void *)pos_blkg + off;
 735
 736                for (i = 0; i < BLKG_RWSTAT_NR; i++)
 737                        sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
 738        }
 739        rcu_read_unlock();
 740}
 741EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
 742
 743/* Performs queue bypass and policy enabled checks then looks up blkg. */
 744static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
 745                                          const struct blkcg_policy *pol,
 746                                          struct request_queue *q)
 747{
 748        WARN_ON_ONCE(!rcu_read_lock_held());
 749        lockdep_assert_held(&q->queue_lock);
 750
 751        if (!blkcg_policy_enabled(q, pol))
 752                return ERR_PTR(-EOPNOTSUPP);
 753        return __blkg_lookup(blkcg, q, true /* update_hint */);
 754}
 755
 756/**
 757 * blkg_conf_prep - parse and prepare for per-blkg config update
 758 * @blkcg: target block cgroup
 759 * @pol: target policy
 760 * @input: input string
 761 * @ctx: blkg_conf_ctx to be filled
 762 *
 763 * Parse per-blkg config update from @input and initialize @ctx with the
 764 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 765 * part of @input following MAJ:MIN.  This function returns with RCU read
 766 * lock and queue lock held and must be paired with blkg_conf_finish().
 767 */
 768int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 769                   char *input, struct blkg_conf_ctx *ctx)
 770        __acquires(rcu) __acquires(&disk->queue->queue_lock)
 771{
 772        struct gendisk *disk;
 773        struct request_queue *q;
 774        struct blkcg_gq *blkg;
 775        unsigned int major, minor;
 776        int key_len, part, ret;
 777        char *body;
 778
 779        if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
 780                return -EINVAL;
 781
 782        body = input + key_len;
 783        if (!isspace(*body))
 784                return -EINVAL;
 785        body = skip_spaces(body);
 786
 787        disk = get_gendisk(MKDEV(major, minor), &part);
 788        if (!disk)
 789                return -ENODEV;
 790        if (part) {
 791                ret = -ENODEV;
 792                goto fail;
 793        }
 794
 795        q = disk->queue;
 796
 797        rcu_read_lock();
 798        spin_lock_irq(&q->queue_lock);
 799
 800        blkg = blkg_lookup_check(blkcg, pol, q);
 801        if (IS_ERR(blkg)) {
 802                ret = PTR_ERR(blkg);
 803                goto fail_unlock;
 804        }
 805
 806        if (blkg)
 807                goto success;
 808
 809        /*
 810         * Create blkgs walking down from blkcg_root to @blkcg, so that all
 811         * non-root blkgs have access to their parents.
 812         */
 813        while (true) {
 814                struct blkcg *pos = blkcg;
 815                struct blkcg *parent;
 816                struct blkcg_gq *new_blkg;
 817
 818                parent = blkcg_parent(blkcg);
 819                while (parent && !__blkg_lookup(parent, q, false)) {
 820                        pos = parent;
 821                        parent = blkcg_parent(parent);
 822                }
 823
 824                /* Drop locks to do new blkg allocation with GFP_KERNEL. */
 825                spin_unlock_irq(&q->queue_lock);
 826                rcu_read_unlock();
 827
 828                new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
 829                if (unlikely(!new_blkg)) {
 830                        ret = -ENOMEM;
 831                        goto fail;
 832                }
 833
 834                rcu_read_lock();
 835                spin_lock_irq(&q->queue_lock);
 836
 837                blkg = blkg_lookup_check(pos, pol, q);
 838                if (IS_ERR(blkg)) {
 839                        ret = PTR_ERR(blkg);
 840                        goto fail_unlock;
 841                }
 842
 843                if (blkg) {
 844                        blkg_free(new_blkg);
 845                } else {
 846                        blkg = blkg_create(pos, q, new_blkg);
 847                        if (IS_ERR(blkg)) {
 848                                ret = PTR_ERR(blkg);
 849                                goto fail_unlock;
 850                        }
 851                }
 852
 853                if (pos == blkcg)
 854                        goto success;
 855        }
 856success:
 857        ctx->disk = disk;
 858        ctx->blkg = blkg;
 859        ctx->body = body;
 860        return 0;
 861
 862fail_unlock:
 863        spin_unlock_irq(&q->queue_lock);
 864        rcu_read_unlock();
 865fail:
 866        put_disk_and_module(disk);
 867        /*
 868         * If queue was bypassing, we should retry.  Do so after a
 869         * short msleep().  It isn't strictly necessary but queue
 870         * can be bypassing for some time and it's always nice to
 871         * avoid busy looping.
 872         */
 873        if (ret == -EBUSY) {
 874                msleep(10);
 875                ret = restart_syscall();
 876        }
 877        return ret;
 878}
 879
 880/**
 881 * blkg_conf_finish - finish up per-blkg config update
 882 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 883 *
 884 * Finish up after per-blkg config update.  This function must be paired
 885 * with blkg_conf_prep().
 886 */
 887void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 888        __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
 889{
 890        spin_unlock_irq(&ctx->disk->queue->queue_lock);
 891        rcu_read_unlock();
 892        put_disk_and_module(ctx->disk);
 893}
 894
 895static int blkcg_print_stat(struct seq_file *sf, void *v)
 896{
 897        struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
 898        struct blkcg_gq *blkg;
 899
 900        rcu_read_lock();
 901
 902        hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 903                const char *dname;
 904                char *buf;
 905                struct blkg_rwstat_sample rwstat;
 906                u64 rbytes, wbytes, rios, wios, dbytes, dios;
 907                size_t size = seq_get_buf(sf, &buf), off = 0;
 908                int i;
 909                bool has_stats = false;
 910
 911                dname = blkg_dev_name(blkg);
 912                if (!dname)
 913                        continue;
 914
 915                /*
 916                 * Hooray string manipulation, count is the size written NOT
 917                 * INCLUDING THE \0, so size is now count+1 less than what we
 918                 * had before, but we want to start writing the next bit from
 919                 * the \0 so we only add count to buf.
 920                 */
 921                off += scnprintf(buf+off, size-off, "%s ", dname);
 922
 923                spin_lock_irq(&blkg->q->queue_lock);
 924
 925                blkg_rwstat_recursive_sum(blkg, NULL,
 926                                offsetof(struct blkcg_gq, stat_bytes), &rwstat);
 927                rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
 928                wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE];
 929                dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD];
 930
 931                blkg_rwstat_recursive_sum(blkg, NULL,
 932                                        offsetof(struct blkcg_gq, stat_ios), &rwstat);
 933                rios = rwstat.cnt[BLKG_RWSTAT_READ];
 934                wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
 935                dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
 936
 937                spin_unlock_irq(&blkg->q->queue_lock);
 938
 939                if (rbytes || wbytes || rios || wios) {
 940                        has_stats = true;
 941                        off += scnprintf(buf+off, size-off,
 942                                         "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
 943                                         rbytes, wbytes, rios, wios,
 944                                         dbytes, dios);
 945                }
 946
 947                if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
 948                        has_stats = true;
 949                        off += scnprintf(buf+off, size-off,
 950                                         " use_delay=%d delay_nsec=%llu",
 951                                         atomic_read(&blkg->use_delay),
 952                                        (unsigned long long)atomic64_read(&blkg->delay_nsec));
 953                }
 954
 955                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 956                        struct blkcg_policy *pol = blkcg_policy[i];
 957                        size_t written;
 958
 959                        if (!blkg->pd[i] || !pol->pd_stat_fn)
 960                                continue;
 961
 962                        written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
 963                        if (written)
 964                                has_stats = true;
 965                        off += written;
 966                }
 967
 968                if (has_stats) {
 969                        if (off < size - 1) {
 970                                off += scnprintf(buf+off, size-off, "\n");
 971                                seq_commit(sf, off);
 972                        } else {
 973                                seq_commit(sf, -1);
 974                        }
 975                }
 976        }
 977
 978        rcu_read_unlock();
 979        return 0;
 980}
 981
 982static struct cftype blkcg_files[] = {
 983        {
 984                .name = "stat",
 985                .flags = CFTYPE_NOT_ON_ROOT,
 986                .seq_show = blkcg_print_stat,
 987        },
 988        { }     /* terminate */
 989};
 990
 991static struct cftype blkcg_legacy_files[] = {
 992        {
 993                .name = "reset_stats",
 994                .write_u64 = blkcg_reset_stats,
 995        },
 996        { }     /* terminate */
 997};
 998
 999/*
1000 * blkcg destruction is a three-stage process.
1001 *
1002 * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
1003 *    which offlines writeback.  Here we tie the next stage of blkg destruction
1004 *    to the completion of writeback associated with the blkcg.  This lets us
1005 *    avoid punting potentially large amounts of outstanding writeback to root
1006 *    while maintaining any ongoing policies.  The next stage is triggered when
1007 *    the nr_cgwbs count goes to zero.
1008 *
1009 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
1010 *    and handles the destruction of blkgs.  Here the css reference held by
1011 *    the blkg is put back eventually allowing blkcg_css_free() to be called.
1012 *    This work may occur in cgwb_release_workfn() on the cgwb_release
1013 *    workqueue.  Any submitted ios that fail to get the blkg ref will be
1014 *    punted to the root_blkg.
1015 *
1016 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
1017 *    This finally frees the blkcg.
1018 */
1019
1020/**
1021 * blkcg_css_offline - cgroup css_offline callback
1022 * @css: css of interest
1023 *
1024 * This function is called when @css is about to go away.  Here the cgwbs are
1025 * offlined first and only once writeback associated with the blkcg has
1026 * finished do we start step 2 (see above).
1027 */
1028static void blkcg_css_offline(struct cgroup_subsys_state *css)
1029{
1030        struct blkcg *blkcg = css_to_blkcg(css);
1031
1032        /* this prevents anyone from attaching or migrating to this blkcg */
1033        wb_blkcg_offline(blkcg);
1034
1035        /* put the base cgwb reference allowing step 2 to be triggered */
1036        blkcg_cgwb_put(blkcg);
1037}
1038
1039/**
1040 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1041 * @blkcg: blkcg of interest
1042 *
1043 * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
1044 * is nested inside q lock, this function performs reverse double lock dancing.
1045 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1046 * blkcg_css_free to eventually be called.
1047 *
1048 * This is the blkcg counterpart of ioc_release_fn().
1049 */
1050void blkcg_destroy_blkgs(struct blkcg *blkcg)
1051{
1052        spin_lock_irq(&blkcg->lock);
1053
1054        while (!hlist_empty(&blkcg->blkg_list)) {
1055                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1056                                                struct blkcg_gq, blkcg_node);
1057                struct request_queue *q = blkg->q;
1058
1059                if (spin_trylock(&q->queue_lock)) {
1060                        blkg_destroy(blkg);
1061                        spin_unlock(&q->queue_lock);
1062                } else {
1063                        spin_unlock_irq(&blkcg->lock);
1064                        cpu_relax();
1065                        spin_lock_irq(&blkcg->lock);
1066                }
1067        }
1068
1069        spin_unlock_irq(&blkcg->lock);
1070}
1071
1072static void blkcg_css_free(struct cgroup_subsys_state *css)
1073{
1074        struct blkcg *blkcg = css_to_blkcg(css);
1075        int i;
1076
1077        mutex_lock(&blkcg_pol_mutex);
1078
1079        list_del(&blkcg->all_blkcgs_node);
1080
1081        for (i = 0; i < BLKCG_MAX_POLS; i++)
1082                if (blkcg->cpd[i])
1083                        blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1084
1085        mutex_unlock(&blkcg_pol_mutex);
1086
1087        kfree(blkcg);
1088}
1089
1090static struct cgroup_subsys_state *
1091blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1092{
1093        struct blkcg *blkcg;
1094        struct cgroup_subsys_state *ret;
1095        int i;
1096
1097        mutex_lock(&blkcg_pol_mutex);
1098
1099        if (!parent_css) {
1100                blkcg = &blkcg_root;
1101        } else {
1102                blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1103                if (!blkcg) {
1104                        ret = ERR_PTR(-ENOMEM);
1105                        goto unlock;
1106                }
1107        }
1108
1109        for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1110                struct blkcg_policy *pol = blkcg_policy[i];
1111                struct blkcg_policy_data *cpd;
1112
1113                /*
1114                 * If the policy hasn't been attached yet, wait for it
1115                 * to be attached before doing anything else. Otherwise,
1116                 * check if the policy requires any specific per-cgroup
1117                 * data: if it does, allocate and initialize it.
1118                 */
1119                if (!pol || !pol->cpd_alloc_fn)
1120                        continue;
1121
1122                cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1123                if (!cpd) {
1124                        ret = ERR_PTR(-ENOMEM);
1125                        goto free_pd_blkcg;
1126                }
1127                blkcg->cpd[i] = cpd;
1128                cpd->blkcg = blkcg;
1129                cpd->plid = i;
1130                if (pol->cpd_init_fn)
1131                        pol->cpd_init_fn(cpd);
1132        }
1133
1134        spin_lock_init(&blkcg->lock);
1135        INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1136        INIT_HLIST_HEAD(&blkcg->blkg_list);
1137#ifdef CONFIG_CGROUP_WRITEBACK
1138        INIT_LIST_HEAD(&blkcg->cgwb_list);
1139        refcount_set(&blkcg->cgwb_refcnt, 1);
1140#endif
1141        list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1142
1143        mutex_unlock(&blkcg_pol_mutex);
1144        return &blkcg->css;
1145
1146free_pd_blkcg:
1147        for (i--; i >= 0; i--)
1148                if (blkcg->cpd[i])
1149                        blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1150
1151        if (blkcg != &blkcg_root)
1152                kfree(blkcg);
1153unlock:
1154        mutex_unlock(&blkcg_pol_mutex);
1155        return ret;
1156}
1157
1158/**
1159 * blkcg_init_queue - initialize blkcg part of request queue
1160 * @q: request_queue to initialize
1161 *
1162 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1163 * part of new request_queue @q.
1164 *
1165 * RETURNS:
1166 * 0 on success, -errno on failure.
1167 */
1168int blkcg_init_queue(struct request_queue *q)
1169{
1170        struct blkcg_gq *new_blkg, *blkg;
1171        bool preloaded;
1172        int ret;
1173
1174        new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1175        if (!new_blkg)
1176                return -ENOMEM;
1177
1178        preloaded = !radix_tree_preload(GFP_KERNEL);
1179
1180        /* Make sure the root blkg exists. */
1181        rcu_read_lock();
1182        spin_lock_irq(&q->queue_lock);
1183        blkg = blkg_create(&blkcg_root, q, new_blkg);
1184        if (IS_ERR(blkg))
1185                goto err_unlock;
1186        q->root_blkg = blkg;
1187        spin_unlock_irq(&q->queue_lock);
1188        rcu_read_unlock();
1189
1190        if (preloaded)
1191                radix_tree_preload_end();
1192
1193        ret = blk_iolatency_init(q);
1194        if (ret)
1195                goto err_destroy_all;
1196
1197        ret = blk_throtl_init(q);
1198        if (ret)
1199                goto err_destroy_all;
1200        return 0;
1201
1202err_destroy_all:
1203        blkg_destroy_all(q);
1204        return ret;
1205err_unlock:
1206        spin_unlock_irq(&q->queue_lock);
1207        rcu_read_unlock();
1208        if (preloaded)
1209                radix_tree_preload_end();
1210        return PTR_ERR(blkg);
1211}
1212
1213/**
1214 * blkcg_drain_queue - drain blkcg part of request_queue
1215 * @q: request_queue to drain
1216 *
1217 * Called from blk_drain_queue().  Responsible for draining blkcg part.
1218 */
1219void blkcg_drain_queue(struct request_queue *q)
1220{
1221        lockdep_assert_held(&q->queue_lock);
1222
1223        /*
1224         * @q could be exiting and already have destroyed all blkgs as
1225         * indicated by NULL root_blkg.  If so, don't confuse policies.
1226         */
1227        if (!q->root_blkg)
1228                return;
1229
1230        blk_throtl_drain(q);
1231}
1232
1233/**
1234 * blkcg_exit_queue - exit and release blkcg part of request_queue
1235 * @q: request_queue being released
1236 *
1237 * Called from blk_exit_queue().  Responsible for exiting blkcg part.
1238 */
1239void blkcg_exit_queue(struct request_queue *q)
1240{
1241        blkg_destroy_all(q);
1242        blk_throtl_exit(q);
1243}
1244
1245/*
1246 * We cannot support shared io contexts, as we have no mean to support
1247 * two tasks with the same ioc in two different groups without major rework
1248 * of the main cic data structures.  For now we allow a task to change
1249 * its cgroup only if it's the only owner of its ioc.
1250 */
1251static int blkcg_can_attach(struct cgroup_taskset *tset)
1252{
1253        struct task_struct *task;
1254        struct cgroup_subsys_state *dst_css;
1255        struct io_context *ioc;
1256        int ret = 0;
1257
1258        /* task_lock() is needed to avoid races with exit_io_context() */
1259        cgroup_taskset_for_each(task, dst_css, tset) {
1260                task_lock(task);
1261                ioc = task->io_context;
1262                if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1263                        ret = -EINVAL;
1264                task_unlock(task);
1265                if (ret)
1266                        break;
1267        }
1268        return ret;
1269}
1270
1271static void blkcg_bind(struct cgroup_subsys_state *root_css)
1272{
1273        int i;
1274
1275        mutex_lock(&blkcg_pol_mutex);
1276
1277        for (i = 0; i < BLKCG_MAX_POLS; i++) {
1278                struct blkcg_policy *pol = blkcg_policy[i];
1279                struct blkcg *blkcg;
1280
1281                if (!pol || !pol->cpd_bind_fn)
1282                        continue;
1283
1284                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1285                        if (blkcg->cpd[pol->plid])
1286                                pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1287        }
1288        mutex_unlock(&blkcg_pol_mutex);
1289}
1290
1291static void blkcg_exit(struct task_struct *tsk)
1292{
1293        if (tsk->throttle_queue)
1294                blk_put_queue(tsk->throttle_queue);
1295        tsk->throttle_queue = NULL;
1296}
1297
1298struct cgroup_subsys io_cgrp_subsys = {
1299        .css_alloc = blkcg_css_alloc,
1300        .css_offline = blkcg_css_offline,
1301        .css_free = blkcg_css_free,
1302        .can_attach = blkcg_can_attach,
1303        .bind = blkcg_bind,
1304        .dfl_cftypes = blkcg_files,
1305        .legacy_cftypes = blkcg_legacy_files,
1306        .legacy_name = "blkio",
1307        .exit = blkcg_exit,
1308#ifdef CONFIG_MEMCG
1309        /*
1310         * This ensures that, if available, memcg is automatically enabled
1311         * together on the default hierarchy so that the owner cgroup can
1312         * be retrieved from writeback pages.
1313         */
1314        .depends_on = 1 << memory_cgrp_id,
1315#endif
1316};
1317EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1318
1319/**
1320 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1321 * @q: request_queue of interest
1322 * @pol: blkcg policy to activate
1323 *
1324 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1325 * bypass mode to populate its blkgs with policy_data for @pol.
1326 *
1327 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1328 * from IO path.  Update of each blkg is protected by both queue and blkcg
1329 * locks so that holding either lock and testing blkcg_policy_enabled() is
1330 * always enough for dereferencing policy data.
1331 *
1332 * The caller is responsible for synchronizing [de]activations and policy
1333 * [un]registerations.  Returns 0 on success, -errno on failure.
1334 */
1335int blkcg_activate_policy(struct request_queue *q,
1336                          const struct blkcg_policy *pol)
1337{
1338        struct blkg_policy_data *pd_prealloc = NULL;
1339        struct blkcg_gq *blkg;
1340        int ret;
1341
1342        if (blkcg_policy_enabled(q, pol))
1343                return 0;
1344
1345        if (queue_is_mq(q))
1346                blk_mq_freeze_queue(q);
1347pd_prealloc:
1348        if (!pd_prealloc) {
1349                pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1350                if (!pd_prealloc) {
1351                        ret = -ENOMEM;
1352                        goto out_bypass_end;
1353                }
1354        }
1355
1356        spin_lock_irq(&q->queue_lock);
1357
1358        /* blkg_list is pushed at the head, reverse walk to init parents first */
1359        list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
1360                struct blkg_policy_data *pd;
1361
1362                if (blkg->pd[pol->plid])
1363                        continue;
1364
1365                pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1366                if (!pd)
1367                        swap(pd, pd_prealloc);
1368                if (!pd) {
1369                        spin_unlock_irq(&q->queue_lock);
1370                        goto pd_prealloc;
1371                }
1372
1373                blkg->pd[pol->plid] = pd;
1374                pd->blkg = blkg;
1375                pd->plid = pol->plid;
1376                if (pol->pd_init_fn)
1377                        pol->pd_init_fn(pd);
1378        }
1379
1380        __set_bit(pol->plid, q->blkcg_pols);
1381        ret = 0;
1382
1383        spin_unlock_irq(&q->queue_lock);
1384out_bypass_end:
1385        if (queue_is_mq(q))
1386                blk_mq_unfreeze_queue(q);
1387        if (pd_prealloc)
1388                pol->pd_free_fn(pd_prealloc);
1389        return ret;
1390}
1391EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1392
1393/**
1394 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1395 * @q: request_queue of interest
1396 * @pol: blkcg policy to deactivate
1397 *
1398 * Deactivate @pol on @q.  Follows the same synchronization rules as
1399 * blkcg_activate_policy().
1400 */
1401void blkcg_deactivate_policy(struct request_queue *q,
1402                             const struct blkcg_policy *pol)
1403{
1404        struct blkcg_gq *blkg;
1405
1406        if (!blkcg_policy_enabled(q, pol))
1407                return;
1408
1409        if (queue_is_mq(q))
1410                blk_mq_freeze_queue(q);
1411
1412        spin_lock_irq(&q->queue_lock);
1413
1414        __clear_bit(pol->plid, q->blkcg_pols);
1415
1416        list_for_each_entry(blkg, &q->blkg_list, q_node) {
1417                if (blkg->pd[pol->plid]) {
1418                        if (pol->pd_offline_fn)
1419                                pol->pd_offline_fn(blkg->pd[pol->plid]);
1420                        pol->pd_free_fn(blkg->pd[pol->plid]);
1421                        blkg->pd[pol->plid] = NULL;
1422                }
1423        }
1424
1425        spin_unlock_irq(&q->queue_lock);
1426
1427        if (queue_is_mq(q))
1428                blk_mq_unfreeze_queue(q);
1429}
1430EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1431
1432/**
1433 * blkcg_policy_register - register a blkcg policy
1434 * @pol: blkcg policy to register
1435 *
1436 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1437 * successful registration.  Returns 0 on success and -errno on failure.
1438 */
1439int blkcg_policy_register(struct blkcg_policy *pol)
1440{
1441        struct blkcg *blkcg;
1442        int i, ret;
1443
1444        mutex_lock(&blkcg_pol_register_mutex);
1445        mutex_lock(&blkcg_pol_mutex);
1446
1447        /* find an empty slot */
1448        ret = -ENOSPC;
1449        for (i = 0; i < BLKCG_MAX_POLS; i++)
1450                if (!blkcg_policy[i])
1451                        break;
1452        if (i >= BLKCG_MAX_POLS) {
1453                pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1454                goto err_unlock;
1455        }
1456
1457        /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1458        if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1459                (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1460                goto err_unlock;
1461
1462        /* register @pol */
1463        pol->plid = i;
1464        blkcg_policy[pol->plid] = pol;
1465
1466        /* allocate and install cpd's */
1467        if (pol->cpd_alloc_fn) {
1468                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1469                        struct blkcg_policy_data *cpd;
1470
1471                        cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1472                        if (!cpd)
1473                                goto err_free_cpds;
1474
1475                        blkcg->cpd[pol->plid] = cpd;
1476                        cpd->blkcg = blkcg;
1477                        cpd->plid = pol->plid;
1478                        pol->cpd_init_fn(cpd);
1479                }
1480        }
1481
1482        mutex_unlock(&blkcg_pol_mutex);
1483
1484        /* everything is in place, add intf files for the new policy */
1485        if (pol->dfl_cftypes)
1486                WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1487                                               pol->dfl_cftypes));
1488        if (pol->legacy_cftypes)
1489                WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1490                                                  pol->legacy_cftypes));
1491        mutex_unlock(&blkcg_pol_register_mutex);
1492        return 0;
1493
1494err_free_cpds:
1495        if (pol->cpd_free_fn) {
1496                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1497                        if (blkcg->cpd[pol->plid]) {
1498                                pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1499                                blkcg->cpd[pol->plid] = NULL;
1500                        }
1501                }
1502        }
1503        blkcg_policy[pol->plid] = NULL;
1504err_unlock:
1505        mutex_unlock(&blkcg_pol_mutex);
1506        mutex_unlock(&blkcg_pol_register_mutex);
1507        return ret;
1508}
1509EXPORT_SYMBOL_GPL(blkcg_policy_register);
1510
1511/**
1512 * blkcg_policy_unregister - unregister a blkcg policy
1513 * @pol: blkcg policy to unregister
1514 *
1515 * Undo blkcg_policy_register(@pol).  Might sleep.
1516 */
1517void blkcg_policy_unregister(struct blkcg_policy *pol)
1518{
1519        struct blkcg *blkcg;
1520
1521        mutex_lock(&blkcg_pol_register_mutex);
1522
1523        if (WARN_ON(blkcg_policy[pol->plid] != pol))
1524                goto out_unlock;
1525
1526        /* kill the intf files first */
1527        if (pol->dfl_cftypes)
1528                cgroup_rm_cftypes(pol->dfl_cftypes);
1529        if (pol->legacy_cftypes)
1530                cgroup_rm_cftypes(pol->legacy_cftypes);
1531
1532        /* remove cpds and unregister */
1533        mutex_lock(&blkcg_pol_mutex);
1534
1535        if (pol->cpd_free_fn) {
1536                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1537                        if (blkcg->cpd[pol->plid]) {
1538                                pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1539                                blkcg->cpd[pol->plid] = NULL;
1540                        }
1541                }
1542        }
1543        blkcg_policy[pol->plid] = NULL;
1544
1545        mutex_unlock(&blkcg_pol_mutex);
1546out_unlock:
1547        mutex_unlock(&blkcg_pol_register_mutex);
1548}
1549EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1550
1551bool __blkcg_punt_bio_submit(struct bio *bio)
1552{
1553        struct blkcg_gq *blkg = bio->bi_blkg;
1554
1555        /* consume the flag first */
1556        bio->bi_opf &= ~REQ_CGROUP_PUNT;
1557
1558        /* never bounce for the root cgroup */
1559        if (!blkg->parent)
1560                return false;
1561
1562        spin_lock_bh(&blkg->async_bio_lock);
1563        bio_list_add(&blkg->async_bios, bio);
1564        spin_unlock_bh(&blkg->async_bio_lock);
1565
1566        queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
1567        return true;
1568}
1569
1570/*
1571 * Scale the accumulated delay based on how long it has been since we updated
1572 * the delay.  We only call this when we are adding delay, in case it's been a
1573 * while since we added delay, and when we are checking to see if we need to
1574 * delay a task, to account for any delays that may have occurred.
1575 */
1576static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1577{
1578        u64 old = atomic64_read(&blkg->delay_start);
1579
1580        /*
1581         * We only want to scale down every second.  The idea here is that we
1582         * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1583         * time window.  We only want to throttle tasks for recent delay that
1584         * has occurred, in 1 second time windows since that's the maximum
1585         * things can be throttled.  We save the current delay window in
1586         * blkg->last_delay so we know what amount is still left to be charged
1587         * to the blkg from this point onward.  blkg->last_use keeps track of
1588         * the use_delay counter.  The idea is if we're unthrottling the blkg we
1589         * are ok with whatever is happening now, and we can take away more of
1590         * the accumulated delay as we've already throttled enough that
1591         * everybody is happy with their IO latencies.
1592         */
1593        if (time_before64(old + NSEC_PER_SEC, now) &&
1594            atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1595                u64 cur = atomic64_read(&blkg->delay_nsec);
1596                u64 sub = min_t(u64, blkg->last_delay, now - old);
1597                int cur_use = atomic_read(&blkg->use_delay);
1598
1599                /*
1600                 * We've been unthrottled, subtract a larger chunk of our
1601                 * accumulated delay.
1602                 */
1603                if (cur_use < blkg->last_use)
1604                        sub = max_t(u64, sub, blkg->last_delay >> 1);
1605
1606                /*
1607                 * This shouldn't happen, but handle it anyway.  Our delay_nsec
1608                 * should only ever be growing except here where we subtract out
1609                 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1610                 * rather not end up with negative numbers.
1611                 */
1612                if (unlikely(cur < sub)) {
1613                        atomic64_set(&blkg->delay_nsec, 0);
1614                        blkg->last_delay = 0;
1615                } else {
1616                        atomic64_sub(sub, &blkg->delay_nsec);
1617                        blkg->last_delay = cur - sub;
1618                }
1619                blkg->last_use = cur_use;
1620        }
1621}
1622
1623/*
1624 * This is called when we want to actually walk up the hierarchy and check to
1625 * see if we need to throttle, and then actually throttle if there is some
1626 * accumulated delay.  This should only be called upon return to user space so
1627 * we're not holding some lock that would induce a priority inversion.
1628 */
1629static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1630{
1631        unsigned long pflags;
1632        u64 now = ktime_to_ns(ktime_get());
1633        u64 exp;
1634        u64 delay_nsec = 0;
1635        int tok;
1636
1637        while (blkg->parent) {
1638                if (atomic_read(&blkg->use_delay)) {
1639                        blkcg_scale_delay(blkg, now);
1640                        delay_nsec = max_t(u64, delay_nsec,
1641                                           atomic64_read(&blkg->delay_nsec));
1642                }
1643                blkg = blkg->parent;
1644        }
1645
1646        if (!delay_nsec)
1647                return;
1648
1649        /*
1650         * Let's not sleep for all eternity if we've amassed a huge delay.
1651         * Swapping or metadata IO can accumulate 10's of seconds worth of
1652         * delay, and we want userspace to be able to do _something_ so cap the
1653         * delays at 1 second.  If there's 10's of seconds worth of delay then
1654         * the tasks will be delayed for 1 second for every syscall.
1655         */
1656        delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1657
1658        if (use_memdelay)
1659                psi_memstall_enter(&pflags);
1660
1661        exp = ktime_add_ns(now, delay_nsec);
1662        tok = io_schedule_prepare();
1663        do {
1664                __set_current_state(TASK_KILLABLE);
1665                if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1666                        break;
1667        } while (!fatal_signal_pending(current));
1668        io_schedule_finish(tok);
1669
1670        if (use_memdelay)
1671                psi_memstall_leave(&pflags);
1672}
1673
1674/**
1675 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1676 *
1677 * This is only called if we've been marked with set_notify_resume().  Obviously
1678 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1679 * check to see if current->throttle_queue is set and if not this doesn't do
1680 * anything.  This should only ever be called by the resume code, it's not meant
1681 * to be called by people willy-nilly as it will actually do the work to
1682 * throttle the task if it is setup for throttling.
1683 */
1684void blkcg_maybe_throttle_current(void)
1685{
1686        struct request_queue *q = current->throttle_queue;
1687        struct cgroup_subsys_state *css;
1688        struct blkcg *blkcg;
1689        struct blkcg_gq *blkg;
1690        bool use_memdelay = current->use_memdelay;
1691
1692        if (!q)
1693                return;
1694
1695        current->throttle_queue = NULL;
1696        current->use_memdelay = false;
1697
1698        rcu_read_lock();
1699        css = kthread_blkcg();
1700        if (css)
1701                blkcg = css_to_blkcg(css);
1702        else
1703                blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1704
1705        if (!blkcg)
1706                goto out;
1707        blkg = blkg_lookup(blkcg, q);
1708        if (!blkg)
1709                goto out;
1710        if (!blkg_tryget(blkg))
1711                goto out;
1712        rcu_read_unlock();
1713
1714        blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1715        blkg_put(blkg);
1716        blk_put_queue(q);
1717        return;
1718out:
1719        rcu_read_unlock();
1720        blk_put_queue(q);
1721}
1722
1723/**
1724 * blkcg_schedule_throttle - this task needs to check for throttling
1725 * @q: the request queue IO was submitted on
1726 * @use_memdelay: do we charge this to memory delay for PSI
1727 *
1728 * This is called by the IO controller when we know there's delay accumulated
1729 * for the blkg for this task.  We do not pass the blkg because there are places
1730 * we call this that may not have that information, the swapping code for
1731 * instance will only have a request_queue at that point.  This set's the
1732 * notify_resume for the task to check and see if it requires throttling before
1733 * returning to user space.
1734 *
1735 * We will only schedule once per syscall.  You can call this over and over
1736 * again and it will only do the check once upon return to user space, and only
1737 * throttle once.  If the task needs to be throttled again it'll need to be
1738 * re-set at the next time we see the task.
1739 */
1740void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1741{
1742        if (unlikely(current->flags & PF_KTHREAD))
1743                return;
1744
1745        if (!blk_get_queue(q))
1746                return;
1747
1748        if (current->throttle_queue)
1749                blk_put_queue(current->throttle_queue);
1750        current->throttle_queue = q;
1751        if (use_memdelay)
1752                current->use_memdelay = use_memdelay;
1753        set_notify_resume(current);
1754}
1755
1756/**
1757 * blkcg_add_delay - add delay to this blkg
1758 * @blkg: blkg of interest
1759 * @now: the current time in nanoseconds
1760 * @delta: how many nanoseconds of delay to add
1761 *
1762 * Charge @delta to the blkg's current delay accumulation.  This is used to
1763 * throttle tasks if an IO controller thinks we need more throttling.
1764 */
1765void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1766{
1767        blkcg_scale_delay(blkg, now);
1768        atomic64_add(delta, &blkg->delay_nsec);
1769}
1770
1771static int __init blkcg_init(void)
1772{
1773        blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
1774                                            WQ_MEM_RECLAIM | WQ_FREEZABLE |
1775                                            WQ_UNBOUND | WQ_SYSFS, 0);
1776        if (!blkcg_punt_bio_wq)
1777                return -ENOMEM;
1778        return 0;
1779}
1780subsys_initcall(blkcg_init);
1781
1782module_param(blkcg_debug_stats, bool, 0644);
1783MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
1784