linux/block/blk-cgroup.c
<<
>>
Prefs
   1/*
   2 * Common Block IO controller cgroup interface
   3 *
   4 * Based on ideas and code from CFQ, CFS and BFQ:
   5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   6 *
   7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   8 *                    Paolo Valente <paolo.valente@unimore.it>
   9 *
  10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11 *                    Nauman Rafique <nauman@google.com>
  12 *
  13 * For policy-specific per-blkcg data:
  14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  15 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
  16 */
  17#include <linux/ioprio.h>
  18#include <linux/kdev_t.h>
  19#include <linux/module.h>
  20#include <linux/sched/signal.h>
  21#include <linux/err.h>
  22#include <linux/blkdev.h>
  23#include <linux/backing-dev.h>
  24#include <linux/slab.h>
  25#include <linux/genhd.h>
  26#include <linux/delay.h>
  27#include <linux/atomic.h>
  28#include <linux/ctype.h>
  29#include <linux/blk-cgroup.h>
  30#include "blk.h"
  31
  32#define MAX_KEY_LEN 100
  33
  34/*
  35 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  36 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  37 * policy [un]register operations including cgroup file additions /
  38 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
  39 * allows grabbing it from cgroup callbacks.
  40 */
  41static DEFINE_MUTEX(blkcg_pol_register_mutex);
  42static DEFINE_MUTEX(blkcg_pol_mutex);
  43
  44struct blkcg blkcg_root;
  45EXPORT_SYMBOL_GPL(blkcg_root);
  46
  47struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
  48
  49static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  50
  51static LIST_HEAD(all_blkcgs);           /* protected by blkcg_pol_mutex */
  52
  53static bool blkcg_policy_enabled(struct request_queue *q,
  54                                 const struct blkcg_policy *pol)
  55{
  56        return pol && test_bit(pol->plid, q->blkcg_pols);
  57}
  58
  59/**
  60 * blkg_free - free a blkg
  61 * @blkg: blkg to free
  62 *
  63 * Free @blkg which may be partially allocated.
  64 */
  65static void blkg_free(struct blkcg_gq *blkg)
  66{
  67        int i;
  68
  69        if (!blkg)
  70                return;
  71
  72        for (i = 0; i < BLKCG_MAX_POLS; i++)
  73                if (blkg->pd[i])
  74                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
  75
  76        if (blkg->blkcg != &blkcg_root)
  77                blk_exit_rl(blkg->q, &blkg->rl);
  78
  79        blkg_rwstat_exit(&blkg->stat_ios);
  80        blkg_rwstat_exit(&blkg->stat_bytes);
  81        kfree(blkg);
  82}
  83
  84/**
  85 * blkg_alloc - allocate a blkg
  86 * @blkcg: block cgroup the new blkg is associated with
  87 * @q: request_queue the new blkg is associated with
  88 * @gfp_mask: allocation mask to use
  89 *
  90 * Allocate a new blkg assocating @blkcg and @q.
  91 */
  92static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  93                                   gfp_t gfp_mask)
  94{
  95        struct blkcg_gq *blkg;
  96        int i;
  97
  98        /* alloc and init base part */
  99        blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
 100        if (!blkg)
 101                return NULL;
 102
 103        if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
 104            blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
 105                goto err_free;
 106
 107        blkg->q = q;
 108        INIT_LIST_HEAD(&blkg->q_node);
 109        blkg->blkcg = blkcg;
 110        atomic_set(&blkg->refcnt, 1);
 111
 112        /* root blkg uses @q->root_rl, init rl only for !root blkgs */
 113        if (blkcg != &blkcg_root) {
 114                if (blk_init_rl(&blkg->rl, q, gfp_mask))
 115                        goto err_free;
 116                blkg->rl.blkg = blkg;
 117        }
 118
 119        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 120                struct blkcg_policy *pol = blkcg_policy[i];
 121                struct blkg_policy_data *pd;
 122
 123                if (!blkcg_policy_enabled(q, pol))
 124                        continue;
 125
 126                /* alloc per-policy data and attach it to blkg */
 127                pd = pol->pd_alloc_fn(gfp_mask, q->node);
 128                if (!pd)
 129                        goto err_free;
 130
 131                blkg->pd[i] = pd;
 132                pd->blkg = blkg;
 133                pd->plid = i;
 134        }
 135
 136        return blkg;
 137
 138err_free:
 139        blkg_free(blkg);
 140        return NULL;
 141}
 142
 143struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 144                                      struct request_queue *q, bool update_hint)
 145{
 146        struct blkcg_gq *blkg;
 147
 148        /*
 149         * Hint didn't match.  Look up from the radix tree.  Note that the
 150         * hint can only be updated under queue_lock as otherwise @blkg
 151         * could have already been removed from blkg_tree.  The caller is
 152         * responsible for grabbing queue_lock if @update_hint.
 153         */
 154        blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 155        if (blkg && blkg->q == q) {
 156                if (update_hint) {
 157                        lockdep_assert_held(q->queue_lock);
 158                        rcu_assign_pointer(blkcg->blkg_hint, blkg);
 159                }
 160                return blkg;
 161        }
 162
 163        return NULL;
 164}
 165EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
 166
 167/*
 168 * If @new_blkg is %NULL, this function tries to allocate a new one as
 169 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
 170 */
 171static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 172                                    struct request_queue *q,
 173                                    struct blkcg_gq *new_blkg)
 174{
 175        struct blkcg_gq *blkg;
 176        struct bdi_writeback_congested *wb_congested;
 177        int i, ret;
 178
 179        WARN_ON_ONCE(!rcu_read_lock_held());
 180        lockdep_assert_held(q->queue_lock);
 181
 182        /* blkg holds a reference to blkcg */
 183        if (!css_tryget_online(&blkcg->css)) {
 184                ret = -ENODEV;
 185                goto err_free_blkg;
 186        }
 187
 188        wb_congested = wb_congested_get_create(q->backing_dev_info,
 189                                               blkcg->css.id,
 190                                               GFP_NOWAIT | __GFP_NOWARN);
 191        if (!wb_congested) {
 192                ret = -ENOMEM;
 193                goto err_put_css;
 194        }
 195
 196        /* allocate */
 197        if (!new_blkg) {
 198                new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
 199                if (unlikely(!new_blkg)) {
 200                        ret = -ENOMEM;
 201                        goto err_put_congested;
 202                }
 203        }
 204        blkg = new_blkg;
 205        blkg->wb_congested = wb_congested;
 206
 207        /* link parent */
 208        if (blkcg_parent(blkcg)) {
 209                blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
 210                if (WARN_ON_ONCE(!blkg->parent)) {
 211                        ret = -ENODEV;
 212                        goto err_put_congested;
 213                }
 214                blkg_get(blkg->parent);
 215        }
 216
 217        /* invoke per-policy init */
 218        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 219                struct blkcg_policy *pol = blkcg_policy[i];
 220
 221                if (blkg->pd[i] && pol->pd_init_fn)
 222                        pol->pd_init_fn(blkg->pd[i]);
 223        }
 224
 225        /* insert */
 226        spin_lock(&blkcg->lock);
 227        ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 228        if (likely(!ret)) {
 229                hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 230                list_add(&blkg->q_node, &q->blkg_list);
 231
 232                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 233                        struct blkcg_policy *pol = blkcg_policy[i];
 234
 235                        if (blkg->pd[i] && pol->pd_online_fn)
 236                                pol->pd_online_fn(blkg->pd[i]);
 237                }
 238        }
 239        blkg->online = true;
 240        spin_unlock(&blkcg->lock);
 241
 242        if (!ret)
 243                return blkg;
 244
 245        /* @blkg failed fully initialized, use the usual release path */
 246        blkg_put(blkg);
 247        return ERR_PTR(ret);
 248
 249err_put_congested:
 250        wb_congested_put(wb_congested);
 251err_put_css:
 252        css_put(&blkcg->css);
 253err_free_blkg:
 254        blkg_free(new_blkg);
 255        return ERR_PTR(ret);
 256}
 257
 258/**
 259 * blkg_lookup_create - lookup blkg, try to create one if not there
 260 * @blkcg: blkcg of interest
 261 * @q: request_queue of interest
 262 *
 263 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
 264 * create one.  blkg creation is performed recursively from blkcg_root such
 265 * that all non-root blkg's have access to the parent blkg.  This function
 266 * should be called under RCU read lock and @q->queue_lock.
 267 *
 268 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 269 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 270 * dead and bypassing, returns ERR_PTR(-EBUSY).
 271 */
 272struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 273                                    struct request_queue *q)
 274{
 275        struct blkcg_gq *blkg;
 276
 277        WARN_ON_ONCE(!rcu_read_lock_held());
 278        lockdep_assert_held(q->queue_lock);
 279
 280        /*
 281         * This could be the first entry point of blkcg implementation and
 282         * we shouldn't allow anything to go through for a bypassing queue.
 283         */
 284        if (unlikely(blk_queue_bypass(q)))
 285                return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
 286
 287        blkg = __blkg_lookup(blkcg, q, true);
 288        if (blkg)
 289                return blkg;
 290
 291        /*
 292         * Create blkgs walking down from blkcg_root to @blkcg, so that all
 293         * non-root blkgs have access to their parents.
 294         */
 295        while (true) {
 296                struct blkcg *pos = blkcg;
 297                struct blkcg *parent = blkcg_parent(blkcg);
 298
 299                while (parent && !__blkg_lookup(parent, q, false)) {
 300                        pos = parent;
 301                        parent = blkcg_parent(parent);
 302                }
 303
 304                blkg = blkg_create(pos, q, NULL);
 305                if (pos == blkcg || IS_ERR(blkg))
 306                        return blkg;
 307        }
 308}
 309
 310static void blkg_pd_offline(struct blkcg_gq *blkg)
 311{
 312        int i;
 313
 314        lockdep_assert_held(blkg->q->queue_lock);
 315        lockdep_assert_held(&blkg->blkcg->lock);
 316
 317        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 318                struct blkcg_policy *pol = blkcg_policy[i];
 319
 320                if (blkg->pd[i] && !blkg->pd[i]->offline &&
 321                    pol->pd_offline_fn) {
 322                        pol->pd_offline_fn(blkg->pd[i]);
 323                        blkg->pd[i]->offline = true;
 324                }
 325        }
 326}
 327
 328static void blkg_destroy(struct blkcg_gq *blkg)
 329{
 330        struct blkcg *blkcg = blkg->blkcg;
 331        struct blkcg_gq *parent = blkg->parent;
 332
 333        lockdep_assert_held(blkg->q->queue_lock);
 334        lockdep_assert_held(&blkcg->lock);
 335
 336        /* Something wrong if we are trying to remove same group twice */
 337        WARN_ON_ONCE(list_empty(&blkg->q_node));
 338        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 339
 340        if (parent) {
 341                blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
 342                blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
 343        }
 344
 345        blkg->online = false;
 346
 347        radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 348        list_del_init(&blkg->q_node);
 349        hlist_del_init_rcu(&blkg->blkcg_node);
 350
 351        /*
 352         * Both setting lookup hint to and clearing it from @blkg are done
 353         * under queue_lock.  If it's not pointing to @blkg now, it never
 354         * will.  Hint assignment itself can race safely.
 355         */
 356        if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
 357                rcu_assign_pointer(blkcg->blkg_hint, NULL);
 358
 359        /*
 360         * Put the reference taken at the time of creation so that when all
 361         * queues are gone, group can be destroyed.
 362         */
 363        blkg_put(blkg);
 364}
 365
 366/**
 367 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 368 * @q: request_queue of interest
 369 *
 370 * Destroy all blkgs associated with @q.
 371 */
 372static void blkg_destroy_all(struct request_queue *q)
 373{
 374        struct blkcg_gq *blkg, *n;
 375
 376        lockdep_assert_held(q->queue_lock);
 377
 378        list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 379                struct blkcg *blkcg = blkg->blkcg;
 380
 381                spin_lock(&blkcg->lock);
 382                blkg_pd_offline(blkg);
 383                blkg_destroy(blkg);
 384                spin_unlock(&blkcg->lock);
 385        }
 386
 387        q->root_blkg = NULL;
 388        q->root_rl.blkg = NULL;
 389}
 390
 391/*
 392 * A group is RCU protected, but having an rcu lock does not mean that one
 393 * can access all the fields of blkg and assume these are valid.  For
 394 * example, don't try to follow throtl_data and request queue links.
 395 *
 396 * Having a reference to blkg under an rcu allows accesses to only values
 397 * local to groups like group stats and group rate limits.
 398 */
 399void __blkg_release_rcu(struct rcu_head *rcu_head)
 400{
 401        struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
 402
 403        /* release the blkcg and parent blkg refs this blkg has been holding */
 404        css_put(&blkg->blkcg->css);
 405        if (blkg->parent)
 406                blkg_put(blkg->parent);
 407
 408        wb_congested_put(blkg->wb_congested);
 409
 410        blkg_free(blkg);
 411}
 412EXPORT_SYMBOL_GPL(__blkg_release_rcu);
 413
 414/*
 415 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 416 * because the root blkg uses @q->root_rl instead of its own rl.
 417 */
 418struct request_list *__blk_queue_next_rl(struct request_list *rl,
 419                                         struct request_queue *q)
 420{
 421        struct list_head *ent;
 422        struct blkcg_gq *blkg;
 423
 424        /*
 425         * Determine the current blkg list_head.  The first entry is
 426         * root_rl which is off @q->blkg_list and mapped to the head.
 427         */
 428        if (rl == &q->root_rl) {
 429                ent = &q->blkg_list;
 430                /* There are no more block groups, hence no request lists */
 431                if (list_empty(ent))
 432                        return NULL;
 433        } else {
 434                blkg = container_of(rl, struct blkcg_gq, rl);
 435                ent = &blkg->q_node;
 436        }
 437
 438        /* walk to the next list_head, skip root blkcg */
 439        ent = ent->next;
 440        if (ent == &q->root_blkg->q_node)
 441                ent = ent->next;
 442        if (ent == &q->blkg_list)
 443                return NULL;
 444
 445        blkg = container_of(ent, struct blkcg_gq, q_node);
 446        return &blkg->rl;
 447}
 448
 449static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 450                             struct cftype *cftype, u64 val)
 451{
 452        struct blkcg *blkcg = css_to_blkcg(css);
 453        struct blkcg_gq *blkg;
 454        int i;
 455
 456        mutex_lock(&blkcg_pol_mutex);
 457        spin_lock_irq(&blkcg->lock);
 458
 459        /*
 460         * Note that stat reset is racy - it doesn't synchronize against
 461         * stat updates.  This is a debug feature which shouldn't exist
 462         * anyway.  If you get hit by a race, retry.
 463         */
 464        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 465                blkg_rwstat_reset(&blkg->stat_bytes);
 466                blkg_rwstat_reset(&blkg->stat_ios);
 467
 468                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 469                        struct blkcg_policy *pol = blkcg_policy[i];
 470
 471                        if (blkg->pd[i] && pol->pd_reset_stats_fn)
 472                                pol->pd_reset_stats_fn(blkg->pd[i]);
 473                }
 474        }
 475
 476        spin_unlock_irq(&blkcg->lock);
 477        mutex_unlock(&blkcg_pol_mutex);
 478        return 0;
 479}
 480
 481const char *blkg_dev_name(struct blkcg_gq *blkg)
 482{
 483        /* some drivers (floppy) instantiate a queue w/o disk registered */
 484        if (blkg->q->backing_dev_info->dev)
 485                return dev_name(blkg->q->backing_dev_info->dev);
 486        return NULL;
 487}
 488EXPORT_SYMBOL_GPL(blkg_dev_name);
 489
 490/**
 491 * blkcg_print_blkgs - helper for printing per-blkg data
 492 * @sf: seq_file to print to
 493 * @blkcg: blkcg of interest
 494 * @prfill: fill function to print out a blkg
 495 * @pol: policy in question
 496 * @data: data to be passed to @prfill
 497 * @show_total: to print out sum of prfill return values or not
 498 *
 499 * This function invokes @prfill on each blkg of @blkcg if pd for the
 500 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 501 * policy data and @data and the matching queue lock held.  If @show_total
 502 * is %true, the sum of the return values from @prfill is printed with
 503 * "Total" label at the end.
 504 *
 505 * This is to be used to construct print functions for
 506 * cftype->read_seq_string method.
 507 */
 508void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 509                       u64 (*prfill)(struct seq_file *,
 510                                     struct blkg_policy_data *, int),
 511                       const struct blkcg_policy *pol, int data,
 512                       bool show_total)
 513{
 514        struct blkcg_gq *blkg;
 515        u64 total = 0;
 516
 517        rcu_read_lock();
 518        hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 519                spin_lock_irq(blkg->q->queue_lock);
 520                if (blkcg_policy_enabled(blkg->q, pol))
 521                        total += prfill(sf, blkg->pd[pol->plid], data);
 522                spin_unlock_irq(blkg->q->queue_lock);
 523        }
 524        rcu_read_unlock();
 525
 526        if (show_total)
 527                seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 528}
 529EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 530
 531/**
 532 * __blkg_prfill_u64 - prfill helper for a single u64 value
 533 * @sf: seq_file to print to
 534 * @pd: policy private data of interest
 535 * @v: value to print
 536 *
 537 * Print @v to @sf for the device assocaited with @pd.
 538 */
 539u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 540{
 541        const char *dname = blkg_dev_name(pd->blkg);
 542
 543        if (!dname)
 544                return 0;
 545
 546        seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 547        return v;
 548}
 549EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 550
 551/**
 552 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 553 * @sf: seq_file to print to
 554 * @pd: policy private data of interest
 555 * @rwstat: rwstat to print
 556 *
 557 * Print @rwstat to @sf for the device assocaited with @pd.
 558 */
 559u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 560                         const struct blkg_rwstat *rwstat)
 561{
 562        static const char *rwstr[] = {
 563                [BLKG_RWSTAT_READ]      = "Read",
 564                [BLKG_RWSTAT_WRITE]     = "Write",
 565                [BLKG_RWSTAT_SYNC]      = "Sync",
 566                [BLKG_RWSTAT_ASYNC]     = "Async",
 567        };
 568        const char *dname = blkg_dev_name(pd->blkg);
 569        u64 v;
 570        int i;
 571
 572        if (!dname)
 573                return 0;
 574
 575        for (i = 0; i < BLKG_RWSTAT_NR; i++)
 576                seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
 577                           (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
 578
 579        v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
 580                atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
 581        seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
 582        return v;
 583}
 584EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
 585
 586/**
 587 * blkg_prfill_stat - prfill callback for blkg_stat
 588 * @sf: seq_file to print to
 589 * @pd: policy private data of interest
 590 * @off: offset to the blkg_stat in @pd
 591 *
 592 * prfill callback for printing a blkg_stat.
 593 */
 594u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
 595{
 596        return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
 597}
 598EXPORT_SYMBOL_GPL(blkg_prfill_stat);
 599
 600/**
 601 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 602 * @sf: seq_file to print to
 603 * @pd: policy private data of interest
 604 * @off: offset to the blkg_rwstat in @pd
 605 *
 606 * prfill callback for printing a blkg_rwstat.
 607 */
 608u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 609                       int off)
 610{
 611        struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
 612
 613        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 614}
 615EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 616
 617static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
 618                                    struct blkg_policy_data *pd, int off)
 619{
 620        struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
 621
 622        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 623}
 624
 625/**
 626 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
 627 * @sf: seq_file to print to
 628 * @v: unused
 629 *
 630 * To be used as cftype->seq_show to print blkg->stat_bytes.
 631 * cftype->private must be set to the blkcg_policy.
 632 */
 633int blkg_print_stat_bytes(struct seq_file *sf, void *v)
 634{
 635        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 636                          blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 637                          offsetof(struct blkcg_gq, stat_bytes), true);
 638        return 0;
 639}
 640EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
 641
 642/**
 643 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
 644 * @sf: seq_file to print to
 645 * @v: unused
 646 *
 647 * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
 648 * must be set to the blkcg_policy.
 649 */
 650int blkg_print_stat_ios(struct seq_file *sf, void *v)
 651{
 652        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 653                          blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 654                          offsetof(struct blkcg_gq, stat_ios), true);
 655        return 0;
 656}
 657EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
 658
 659static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
 660                                              struct blkg_policy_data *pd,
 661                                              int off)
 662{
 663        struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
 664                                                              NULL, off);
 665        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 666}
 667
 668/**
 669 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
 670 * @sf: seq_file to print to
 671 * @v: unused
 672 */
 673int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
 674{
 675        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 676                          blkg_prfill_rwstat_field_recursive,
 677                          (void *)seq_cft(sf)->private,
 678                          offsetof(struct blkcg_gq, stat_bytes), true);
 679        return 0;
 680}
 681EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
 682
 683/**
 684 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
 685 * @sf: seq_file to print to
 686 * @v: unused
 687 */
 688int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
 689{
 690        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 691                          blkg_prfill_rwstat_field_recursive,
 692                          (void *)seq_cft(sf)->private,
 693                          offsetof(struct blkcg_gq, stat_ios), true);
 694        return 0;
 695}
 696EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
 697
 698/**
 699 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
 700 * @blkg: blkg of interest
 701 * @pol: blkcg_policy which contains the blkg_stat
 702 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
 703 *
 704 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
 705 * online descendants and their aux counts.  The caller must be holding the
 706 * queue lock for online tests.
 707 *
 708 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
 709 * at @off bytes into @blkg's blkg_policy_data of the policy.
 710 */
 711u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
 712                            struct blkcg_policy *pol, int off)
 713{
 714        struct blkcg_gq *pos_blkg;
 715        struct cgroup_subsys_state *pos_css;
 716        u64 sum = 0;
 717
 718        lockdep_assert_held(blkg->q->queue_lock);
 719
 720        rcu_read_lock();
 721        blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 722                struct blkg_stat *stat;
 723
 724                if (!pos_blkg->online)
 725                        continue;
 726
 727                if (pol)
 728                        stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 729                else
 730                        stat = (void *)blkg + off;
 731
 732                sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
 733        }
 734        rcu_read_unlock();
 735
 736        return sum;
 737}
 738EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
 739
 740/**
 741 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 742 * @blkg: blkg of interest
 743 * @pol: blkcg_policy which contains the blkg_rwstat
 744 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
 745 *
 746 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
 747 * online descendants and their aux counts.  The caller must be holding the
 748 * queue lock for online tests.
 749 *
 750 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
 751 * is at @off bytes into @blkg's blkg_policy_data of the policy.
 752 */
 753struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
 754                                             struct blkcg_policy *pol, int off)
 755{
 756        struct blkcg_gq *pos_blkg;
 757        struct cgroup_subsys_state *pos_css;
 758        struct blkg_rwstat sum = { };
 759        int i;
 760
 761        lockdep_assert_held(blkg->q->queue_lock);
 762
 763        rcu_read_lock();
 764        blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 765                struct blkg_rwstat *rwstat;
 766
 767                if (!pos_blkg->online)
 768                        continue;
 769
 770                if (pol)
 771                        rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 772                else
 773                        rwstat = (void *)pos_blkg + off;
 774
 775                for (i = 0; i < BLKG_RWSTAT_NR; i++)
 776                        atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
 777                                percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
 778                                &sum.aux_cnt[i]);
 779        }
 780        rcu_read_unlock();
 781
 782        return sum;
 783}
 784EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
 785
 786/* Performs queue bypass and policy enabled checks then looks up blkg. */
 787static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
 788                                          const struct blkcg_policy *pol,
 789                                          struct request_queue *q)
 790{
 791        WARN_ON_ONCE(!rcu_read_lock_held());
 792        lockdep_assert_held(q->queue_lock);
 793
 794        if (!blkcg_policy_enabled(q, pol))
 795                return ERR_PTR(-EOPNOTSUPP);
 796
 797        /*
 798         * This could be the first entry point of blkcg implementation and
 799         * we shouldn't allow anything to go through for a bypassing queue.
 800         */
 801        if (unlikely(blk_queue_bypass(q)))
 802                return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
 803
 804        return __blkg_lookup(blkcg, q, true /* update_hint */);
 805}
 806
 807/**
 808 * blkg_conf_prep - parse and prepare for per-blkg config update
 809 * @blkcg: target block cgroup
 810 * @pol: target policy
 811 * @input: input string
 812 * @ctx: blkg_conf_ctx to be filled
 813 *
 814 * Parse per-blkg config update from @input and initialize @ctx with the
 815 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 816 * part of @input following MAJ:MIN.  This function returns with RCU read
 817 * lock and queue lock held and must be paired with blkg_conf_finish().
 818 */
 819int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 820                   char *input, struct blkg_conf_ctx *ctx)
 821        __acquires(rcu) __acquires(disk->queue->queue_lock)
 822{
 823        struct gendisk *disk;
 824        struct request_queue *q;
 825        struct blkcg_gq *blkg;
 826        unsigned int major, minor;
 827        int key_len, part, ret;
 828        char *body;
 829
 830        if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
 831                return -EINVAL;
 832
 833        body = input + key_len;
 834        if (!isspace(*body))
 835                return -EINVAL;
 836        body = skip_spaces(body);
 837
 838        disk = get_gendisk(MKDEV(major, minor), &part);
 839        if (!disk)
 840                return -ENODEV;
 841        if (part) {
 842                ret = -ENODEV;
 843                goto fail;
 844        }
 845
 846        q = disk->queue;
 847
 848        rcu_read_lock();
 849        spin_lock_irq(q->queue_lock);
 850
 851        blkg = blkg_lookup_check(blkcg, pol, q);
 852        if (IS_ERR(blkg)) {
 853                ret = PTR_ERR(blkg);
 854                goto fail_unlock;
 855        }
 856
 857        if (blkg)
 858                goto success;
 859
 860        /*
 861         * Create blkgs walking down from blkcg_root to @blkcg, so that all
 862         * non-root blkgs have access to their parents.
 863         */
 864        while (true) {
 865                struct blkcg *pos = blkcg;
 866                struct blkcg *parent;
 867                struct blkcg_gq *new_blkg;
 868
 869                parent = blkcg_parent(blkcg);
 870                while (parent && !__blkg_lookup(parent, q, false)) {
 871                        pos = parent;
 872                        parent = blkcg_parent(parent);
 873                }
 874
 875                /* Drop locks to do new blkg allocation with GFP_KERNEL. */
 876                spin_unlock_irq(q->queue_lock);
 877                rcu_read_unlock();
 878
 879                new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
 880                if (unlikely(!new_blkg)) {
 881                        ret = -ENOMEM;
 882                        goto fail;
 883                }
 884
 885                rcu_read_lock();
 886                spin_lock_irq(q->queue_lock);
 887
 888                blkg = blkg_lookup_check(pos, pol, q);
 889                if (IS_ERR(blkg)) {
 890                        ret = PTR_ERR(blkg);
 891                        goto fail_unlock;
 892                }
 893
 894                if (blkg) {
 895                        blkg_free(new_blkg);
 896                } else {
 897                        blkg = blkg_create(pos, q, new_blkg);
 898                        if (unlikely(IS_ERR(blkg))) {
 899                                ret = PTR_ERR(blkg);
 900                                goto fail_unlock;
 901                        }
 902                }
 903
 904                if (pos == blkcg)
 905                        goto success;
 906        }
 907success:
 908        ctx->disk = disk;
 909        ctx->blkg = blkg;
 910        ctx->body = body;
 911        return 0;
 912
 913fail_unlock:
 914        spin_unlock_irq(q->queue_lock);
 915        rcu_read_unlock();
 916fail:
 917        put_disk_and_module(disk);
 918        /*
 919         * If queue was bypassing, we should retry.  Do so after a
 920         * short msleep().  It isn't strictly necessary but queue
 921         * can be bypassing for some time and it's always nice to
 922         * avoid busy looping.
 923         */
 924        if (ret == -EBUSY) {
 925                msleep(10);
 926                ret = restart_syscall();
 927        }
 928        return ret;
 929}
 930EXPORT_SYMBOL_GPL(blkg_conf_prep);
 931
 932/**
 933 * blkg_conf_finish - finish up per-blkg config update
 934 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 935 *
 936 * Finish up after per-blkg config update.  This function must be paired
 937 * with blkg_conf_prep().
 938 */
 939void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 940        __releases(ctx->disk->queue->queue_lock) __releases(rcu)
 941{
 942        spin_unlock_irq(ctx->disk->queue->queue_lock);
 943        rcu_read_unlock();
 944        put_disk_and_module(ctx->disk);
 945}
 946EXPORT_SYMBOL_GPL(blkg_conf_finish);
 947
 948static int blkcg_print_stat(struct seq_file *sf, void *v)
 949{
 950        struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
 951        struct blkcg_gq *blkg;
 952
 953        rcu_read_lock();
 954
 955        hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 956                const char *dname;
 957                struct blkg_rwstat rwstat;
 958                u64 rbytes, wbytes, rios, wios;
 959
 960                dname = blkg_dev_name(blkg);
 961                if (!dname)
 962                        continue;
 963
 964                spin_lock_irq(blkg->q->queue_lock);
 965
 966                rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
 967                                        offsetof(struct blkcg_gq, stat_bytes));
 968                rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
 969                wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
 970
 971                rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
 972                                        offsetof(struct blkcg_gq, stat_ios));
 973                rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
 974                wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
 975
 976                spin_unlock_irq(blkg->q->queue_lock);
 977
 978                if (rbytes || wbytes || rios || wios)
 979                        seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
 980                                   dname, rbytes, wbytes, rios, wios);
 981        }
 982
 983        rcu_read_unlock();
 984        return 0;
 985}
 986
 987static struct cftype blkcg_files[] = {
 988        {
 989                .name = "stat",
 990                .flags = CFTYPE_NOT_ON_ROOT,
 991                .seq_show = blkcg_print_stat,
 992        },
 993        { }     /* terminate */
 994};
 995
 996static struct cftype blkcg_legacy_files[] = {
 997        {
 998                .name = "reset_stats",
 999                .write_u64 = blkcg_reset_stats,
1000        },
1001        { }     /* terminate */
1002};
1003
1004/**
1005 * blkcg_css_offline - cgroup css_offline callback
1006 * @css: css of interest
1007 *
1008 * This function is called when @css is about to go away and responsible
1009 * for offlining all blkgs pd and killing all wbs associated with @css.
1010 * blkgs pd offline should be done while holding both q and blkcg locks.
1011 * As blkcg lock is nested inside q lock, this function performs reverse
1012 * double lock dancing.
1013 *
1014 * This is the blkcg counterpart of ioc_release_fn().
1015 */
1016static void blkcg_css_offline(struct cgroup_subsys_state *css)
1017{
1018        struct blkcg *blkcg = css_to_blkcg(css);
1019        struct blkcg_gq *blkg;
1020
1021        spin_lock_irq(&blkcg->lock);
1022
1023        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1024                struct request_queue *q = blkg->q;
1025
1026                if (spin_trylock(q->queue_lock)) {
1027                        blkg_pd_offline(blkg);
1028                        spin_unlock(q->queue_lock);
1029                } else {
1030                        spin_unlock_irq(&blkcg->lock);
1031                        cpu_relax();
1032                        spin_lock_irq(&blkcg->lock);
1033                }
1034        }
1035
1036        spin_unlock_irq(&blkcg->lock);
1037
1038        wb_blkcg_offline(blkcg);
1039}
1040
1041/**
1042 * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
1043 * @blkcg: blkcg of interest
1044 *
1045 * This function is called when blkcg css is about to free and responsible for
1046 * destroying all blkgs associated with @blkcg.
1047 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1048 * is nested inside q lock, this function performs reverse double lock dancing.
1049 */
1050static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
1051{
1052        spin_lock_irq(&blkcg->lock);
1053        while (!hlist_empty(&blkcg->blkg_list)) {
1054                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1055                                                    struct blkcg_gq,
1056                                                    blkcg_node);
1057                struct request_queue *q = blkg->q;
1058
1059                if (spin_trylock(q->queue_lock)) {
1060                        blkg_destroy(blkg);
1061                        spin_unlock(q->queue_lock);
1062                } else {
1063                        spin_unlock_irq(&blkcg->lock);
1064                        cpu_relax();
1065                        spin_lock_irq(&blkcg->lock);
1066                }
1067        }
1068        spin_unlock_irq(&blkcg->lock);
1069}
1070
1071static void blkcg_css_free(struct cgroup_subsys_state *css)
1072{
1073        struct blkcg *blkcg = css_to_blkcg(css);
1074        int i;
1075
1076        blkcg_destroy_all_blkgs(blkcg);
1077
1078        mutex_lock(&blkcg_pol_mutex);
1079
1080        list_del(&blkcg->all_blkcgs_node);
1081
1082        for (i = 0; i < BLKCG_MAX_POLS; i++)
1083                if (blkcg->cpd[i])
1084                        blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1085
1086        mutex_unlock(&blkcg_pol_mutex);
1087
1088        kfree(blkcg);
1089}
1090
1091static struct cgroup_subsys_state *
1092blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1093{
1094        struct blkcg *blkcg;
1095        struct cgroup_subsys_state *ret;
1096        int i;
1097
1098        mutex_lock(&blkcg_pol_mutex);
1099
1100        if (!parent_css) {
1101                blkcg = &blkcg_root;
1102        } else {
1103                blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1104                if (!blkcg) {
1105                        ret = ERR_PTR(-ENOMEM);
1106                        goto unlock;
1107                }
1108        }
1109
1110        for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1111                struct blkcg_policy *pol = blkcg_policy[i];
1112                struct blkcg_policy_data *cpd;
1113
1114                /*
1115                 * If the policy hasn't been attached yet, wait for it
1116                 * to be attached before doing anything else. Otherwise,
1117                 * check if the policy requires any specific per-cgroup
1118                 * data: if it does, allocate and initialize it.
1119                 */
1120                if (!pol || !pol->cpd_alloc_fn)
1121                        continue;
1122
1123                cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1124                if (!cpd) {
1125                        ret = ERR_PTR(-ENOMEM);
1126                        goto free_pd_blkcg;
1127                }
1128                blkcg->cpd[i] = cpd;
1129                cpd->blkcg = blkcg;
1130                cpd->plid = i;
1131                if (pol->cpd_init_fn)
1132                        pol->cpd_init_fn(cpd);
1133        }
1134
1135        spin_lock_init(&blkcg->lock);
1136        INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1137        INIT_HLIST_HEAD(&blkcg->blkg_list);
1138#ifdef CONFIG_CGROUP_WRITEBACK
1139        INIT_LIST_HEAD(&blkcg->cgwb_list);
1140#endif
1141        list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1142
1143        mutex_unlock(&blkcg_pol_mutex);
1144        return &blkcg->css;
1145
1146free_pd_blkcg:
1147        for (i--; i >= 0; i--)
1148                if (blkcg->cpd[i])
1149                        blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1150
1151        if (blkcg != &blkcg_root)
1152                kfree(blkcg);
1153unlock:
1154        mutex_unlock(&blkcg_pol_mutex);
1155        return ret;
1156}
1157
1158/**
1159 * blkcg_init_queue - initialize blkcg part of request queue
1160 * @q: request_queue to initialize
1161 *
1162 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1163 * part of new request_queue @q.
1164 *
1165 * RETURNS:
1166 * 0 on success, -errno on failure.
1167 */
1168int blkcg_init_queue(struct request_queue *q)
1169{
1170        struct blkcg_gq *new_blkg, *blkg;
1171        bool preloaded;
1172        int ret;
1173
1174        new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1175        if (!new_blkg)
1176                return -ENOMEM;
1177
1178        preloaded = !radix_tree_preload(GFP_KERNEL);
1179
1180        /* Make sure the root blkg exists. */
1181        rcu_read_lock();
1182        spin_lock_irq(q->queue_lock);
1183        blkg = blkg_create(&blkcg_root, q, new_blkg);
1184        if (IS_ERR(blkg))
1185                goto err_unlock;
1186        q->root_blkg = blkg;
1187        q->root_rl.blkg = blkg;
1188        spin_unlock_irq(q->queue_lock);
1189        rcu_read_unlock();
1190
1191        if (preloaded)
1192                radix_tree_preload_end();
1193
1194        ret = blk_throtl_init(q);
1195        if (ret) {
1196                spin_lock_irq(q->queue_lock);
1197                blkg_destroy_all(q);
1198                spin_unlock_irq(q->queue_lock);
1199        }
1200        return ret;
1201
1202err_unlock:
1203        spin_unlock_irq(q->queue_lock);
1204        rcu_read_unlock();
1205        if (preloaded)
1206                radix_tree_preload_end();
1207        return PTR_ERR(blkg);
1208}
1209
1210/**
1211 * blkcg_drain_queue - drain blkcg part of request_queue
1212 * @q: request_queue to drain
1213 *
1214 * Called from blk_drain_queue().  Responsible for draining blkcg part.
1215 */
1216void blkcg_drain_queue(struct request_queue *q)
1217{
1218        lockdep_assert_held(q->queue_lock);
1219
1220        /*
1221         * @q could be exiting and already have destroyed all blkgs as
1222         * indicated by NULL root_blkg.  If so, don't confuse policies.
1223         */
1224        if (!q->root_blkg)
1225                return;
1226
1227        blk_throtl_drain(q);
1228}
1229
1230/**
1231 * blkcg_exit_queue - exit and release blkcg part of request_queue
1232 * @q: request_queue being released
1233 *
1234 * Called from blk_release_queue().  Responsible for exiting blkcg part.
1235 */
1236void blkcg_exit_queue(struct request_queue *q)
1237{
1238        spin_lock_irq(q->queue_lock);
1239        blkg_destroy_all(q);
1240        spin_unlock_irq(q->queue_lock);
1241
1242        blk_throtl_exit(q);
1243}
1244
1245/*
1246 * We cannot support shared io contexts, as we have no mean to support
1247 * two tasks with the same ioc in two different groups without major rework
1248 * of the main cic data structures.  For now we allow a task to change
1249 * its cgroup only if it's the only owner of its ioc.
1250 */
1251static int blkcg_can_attach(struct cgroup_taskset *tset)
1252{
1253        struct task_struct *task;
1254        struct cgroup_subsys_state *dst_css;
1255        struct io_context *ioc;
1256        int ret = 0;
1257
1258        /* task_lock() is needed to avoid races with exit_io_context() */
1259        cgroup_taskset_for_each(task, dst_css, tset) {
1260                task_lock(task);
1261                ioc = task->io_context;
1262                if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1263                        ret = -EINVAL;
1264                task_unlock(task);
1265                if (ret)
1266                        break;
1267        }
1268        return ret;
1269}
1270
1271static void blkcg_bind(struct cgroup_subsys_state *root_css)
1272{
1273        int i;
1274
1275        mutex_lock(&blkcg_pol_mutex);
1276
1277        for (i = 0; i < BLKCG_MAX_POLS; i++) {
1278                struct blkcg_policy *pol = blkcg_policy[i];
1279                struct blkcg *blkcg;
1280
1281                if (!pol || !pol->cpd_bind_fn)
1282                        continue;
1283
1284                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1285                        if (blkcg->cpd[pol->plid])
1286                                pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1287        }
1288        mutex_unlock(&blkcg_pol_mutex);
1289}
1290
1291struct cgroup_subsys io_cgrp_subsys = {
1292        .css_alloc = blkcg_css_alloc,
1293        .css_offline = blkcg_css_offline,
1294        .css_free = blkcg_css_free,
1295        .can_attach = blkcg_can_attach,
1296        .bind = blkcg_bind,
1297        .dfl_cftypes = blkcg_files,
1298        .legacy_cftypes = blkcg_legacy_files,
1299        .legacy_name = "blkio",
1300#ifdef CONFIG_MEMCG
1301        /*
1302         * This ensures that, if available, memcg is automatically enabled
1303         * together on the default hierarchy so that the owner cgroup can
1304         * be retrieved from writeback pages.
1305         */
1306        .depends_on = 1 << memory_cgrp_id,
1307#endif
1308};
1309EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1310
1311/**
1312 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1313 * @q: request_queue of interest
1314 * @pol: blkcg policy to activate
1315 *
1316 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1317 * bypass mode to populate its blkgs with policy_data for @pol.
1318 *
1319 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1320 * from IO path.  Update of each blkg is protected by both queue and blkcg
1321 * locks so that holding either lock and testing blkcg_policy_enabled() is
1322 * always enough for dereferencing policy data.
1323 *
1324 * The caller is responsible for synchronizing [de]activations and policy
1325 * [un]registerations.  Returns 0 on success, -errno on failure.
1326 */
1327int blkcg_activate_policy(struct request_queue *q,
1328                          const struct blkcg_policy *pol)
1329{
1330        struct blkg_policy_data *pd_prealloc = NULL;
1331        struct blkcg_gq *blkg;
1332        int ret;
1333
1334        if (blkcg_policy_enabled(q, pol))
1335                return 0;
1336
1337        if (q->mq_ops)
1338                blk_mq_freeze_queue(q);
1339        else
1340                blk_queue_bypass_start(q);
1341pd_prealloc:
1342        if (!pd_prealloc) {
1343                pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1344                if (!pd_prealloc) {
1345                        ret = -ENOMEM;
1346                        goto out_bypass_end;
1347                }
1348        }
1349
1350        spin_lock_irq(q->queue_lock);
1351
1352        list_for_each_entry(blkg, &q->blkg_list, q_node) {
1353                struct blkg_policy_data *pd;
1354
1355                if (blkg->pd[pol->plid])
1356                        continue;
1357
1358                pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1359                if (!pd)
1360                        swap(pd, pd_prealloc);
1361                if (!pd) {
1362                        spin_unlock_irq(q->queue_lock);
1363                        goto pd_prealloc;
1364                }
1365
1366                blkg->pd[pol->plid] = pd;
1367                pd->blkg = blkg;
1368                pd->plid = pol->plid;
1369                if (pol->pd_init_fn)
1370                        pol->pd_init_fn(pd);
1371        }
1372
1373        __set_bit(pol->plid, q->blkcg_pols);
1374        ret = 0;
1375
1376        spin_unlock_irq(q->queue_lock);
1377out_bypass_end:
1378        if (q->mq_ops)
1379                blk_mq_unfreeze_queue(q);
1380        else
1381                blk_queue_bypass_end(q);
1382        if (pd_prealloc)
1383                pol->pd_free_fn(pd_prealloc);
1384        return ret;
1385}
1386EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1387
1388/**
1389 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1390 * @q: request_queue of interest
1391 * @pol: blkcg policy to deactivate
1392 *
1393 * Deactivate @pol on @q.  Follows the same synchronization rules as
1394 * blkcg_activate_policy().
1395 */
1396void blkcg_deactivate_policy(struct request_queue *q,
1397                             const struct blkcg_policy *pol)
1398{
1399        struct blkcg_gq *blkg;
1400
1401        if (!blkcg_policy_enabled(q, pol))
1402                return;
1403
1404        if (q->mq_ops)
1405                blk_mq_freeze_queue(q);
1406        else
1407                blk_queue_bypass_start(q);
1408
1409        spin_lock_irq(q->queue_lock);
1410
1411        __clear_bit(pol->plid, q->blkcg_pols);
1412
1413        list_for_each_entry(blkg, &q->blkg_list, q_node) {
1414                if (blkg->pd[pol->plid]) {
1415                        if (!blkg->pd[pol->plid]->offline &&
1416                            pol->pd_offline_fn) {
1417                                pol->pd_offline_fn(blkg->pd[pol->plid]);
1418                                blkg->pd[pol->plid]->offline = true;
1419                        }
1420                        pol->pd_free_fn(blkg->pd[pol->plid]);
1421                        blkg->pd[pol->plid] = NULL;
1422                }
1423        }
1424
1425        spin_unlock_irq(q->queue_lock);
1426
1427        if (q->mq_ops)
1428                blk_mq_unfreeze_queue(q);
1429        else
1430                blk_queue_bypass_end(q);
1431}
1432EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1433
1434/**
1435 * blkcg_policy_register - register a blkcg policy
1436 * @pol: blkcg policy to register
1437 *
1438 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1439 * successful registration.  Returns 0 on success and -errno on failure.
1440 */
1441int blkcg_policy_register(struct blkcg_policy *pol)
1442{
1443        struct blkcg *blkcg;
1444        int i, ret;
1445
1446        mutex_lock(&blkcg_pol_register_mutex);
1447        mutex_lock(&blkcg_pol_mutex);
1448
1449        /* find an empty slot */
1450        ret = -ENOSPC;
1451        for (i = 0; i < BLKCG_MAX_POLS; i++)
1452                if (!blkcg_policy[i])
1453                        break;
1454        if (i >= BLKCG_MAX_POLS)
1455                goto err_unlock;
1456
1457        /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1458        if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1459                (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1460                goto err_unlock;
1461
1462        /* register @pol */
1463        pol->plid = i;
1464        blkcg_policy[pol->plid] = pol;
1465
1466        /* allocate and install cpd's */
1467        if (pol->cpd_alloc_fn) {
1468                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1469                        struct blkcg_policy_data *cpd;
1470
1471                        cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1472                        if (!cpd)
1473                                goto err_free_cpds;
1474
1475                        blkcg->cpd[pol->plid] = cpd;
1476                        cpd->blkcg = blkcg;
1477                        cpd->plid = pol->plid;
1478                        pol->cpd_init_fn(cpd);
1479                }
1480        }
1481
1482        mutex_unlock(&blkcg_pol_mutex);
1483
1484        /* everything is in place, add intf files for the new policy */
1485        if (pol->dfl_cftypes)
1486                WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1487                                               pol->dfl_cftypes));
1488        if (pol->legacy_cftypes)
1489                WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1490                                                  pol->legacy_cftypes));
1491        mutex_unlock(&blkcg_pol_register_mutex);
1492        return 0;
1493
1494err_free_cpds:
1495        if (pol->cpd_free_fn) {
1496                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1497                        if (blkcg->cpd[pol->plid]) {
1498                                pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1499                                blkcg->cpd[pol->plid] = NULL;
1500                        }
1501                }
1502        }
1503        blkcg_policy[pol->plid] = NULL;
1504err_unlock:
1505        mutex_unlock(&blkcg_pol_mutex);
1506        mutex_unlock(&blkcg_pol_register_mutex);
1507        return ret;
1508}
1509EXPORT_SYMBOL_GPL(blkcg_policy_register);
1510
1511/**
1512 * blkcg_policy_unregister - unregister a blkcg policy
1513 * @pol: blkcg policy to unregister
1514 *
1515 * Undo blkcg_policy_register(@pol).  Might sleep.
1516 */
1517void blkcg_policy_unregister(struct blkcg_policy *pol)
1518{
1519        struct blkcg *blkcg;
1520
1521        mutex_lock(&blkcg_pol_register_mutex);
1522
1523        if (WARN_ON(blkcg_policy[pol->plid] != pol))
1524                goto out_unlock;
1525
1526        /* kill the intf files first */
1527        if (pol->dfl_cftypes)
1528                cgroup_rm_cftypes(pol->dfl_cftypes);
1529        if (pol->legacy_cftypes)
1530                cgroup_rm_cftypes(pol->legacy_cftypes);
1531
1532        /* remove cpds and unregister */
1533        mutex_lock(&blkcg_pol_mutex);
1534
1535        if (pol->cpd_free_fn) {
1536                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1537                        if (blkcg->cpd[pol->plid]) {
1538                                pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1539                                blkcg->cpd[pol->plid] = NULL;
1540                        }
1541                }
1542        }
1543        blkcg_policy[pol->plid] = NULL;
1544
1545        mutex_unlock(&blkcg_pol_mutex);
1546out_unlock:
1547        mutex_unlock(&blkcg_pol_register_mutex);
1548}
1549EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1550