linux/block/blk-cgroup.c
<<
>>
Prefs
   1/*
   2 * Common Block IO controller cgroup interface
   3 *
   4 * Based on ideas and code from CFQ, CFS and BFQ:
   5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   6 *
   7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   8 *                    Paolo Valente <paolo.valente@unimore.it>
   9 *
  10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11 *                    Nauman Rafique <nauman@google.com>
  12 *
  13 * For policy-specific per-blkcg data:
  14 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  15 *                    Arianna Avanzini <avanzini.arianna@gmail.com>
  16 */
  17#include <linux/ioprio.h>
  18#include <linux/kdev_t.h>
  19#include <linux/module.h>
  20#include <linux/err.h>
  21#include <linux/blkdev.h>
  22#include <linux/backing-dev.h>
  23#include <linux/slab.h>
  24#include <linux/genhd.h>
  25#include <linux/delay.h>
  26#include <linux/atomic.h>
  27#include <linux/ctype.h>
  28#include <linux/blk-cgroup.h>
  29#include "blk.h"
  30
  31#define MAX_KEY_LEN 100
  32
  33/*
  34 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  35 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  36 * policy [un]register operations including cgroup file additions /
  37 * removals.  Putting cgroup file registration outside blkcg_pol_mutex
  38 * allows grabbing it from cgroup callbacks.
  39 */
  40static DEFINE_MUTEX(blkcg_pol_register_mutex);
  41static DEFINE_MUTEX(blkcg_pol_mutex);
  42
  43struct blkcg blkcg_root;
  44EXPORT_SYMBOL_GPL(blkcg_root);
  45
  46struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
  47
  48static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  49
  50static LIST_HEAD(all_blkcgs);           /* protected by blkcg_pol_mutex */
  51
  52static bool blkcg_policy_enabled(struct request_queue *q,
  53                                 const struct blkcg_policy *pol)
  54{
  55        return pol && test_bit(pol->plid, q->blkcg_pols);
  56}
  57
  58/**
  59 * blkg_free - free a blkg
  60 * @blkg: blkg to free
  61 *
  62 * Free @blkg which may be partially allocated.
  63 */
  64static void blkg_free(struct blkcg_gq *blkg)
  65{
  66        int i;
  67
  68        if (!blkg)
  69                return;
  70
  71        for (i = 0; i < BLKCG_MAX_POLS; i++)
  72                if (blkg->pd[i])
  73                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
  74
  75        if (blkg->blkcg != &blkcg_root)
  76                blk_exit_rl(&blkg->rl);
  77
  78        blkg_rwstat_exit(&blkg->stat_ios);
  79        blkg_rwstat_exit(&blkg->stat_bytes);
  80        kfree(blkg);
  81}
  82
  83/**
  84 * blkg_alloc - allocate a blkg
  85 * @blkcg: block cgroup the new blkg is associated with
  86 * @q: request_queue the new blkg is associated with
  87 * @gfp_mask: allocation mask to use
  88 *
  89 * Allocate a new blkg assocating @blkcg and @q.
  90 */
  91static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  92                                   gfp_t gfp_mask)
  93{
  94        struct blkcg_gq *blkg;
  95        int i;
  96
  97        /* alloc and init base part */
  98        blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
  99        if (!blkg)
 100                return NULL;
 101
 102        if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
 103            blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
 104                goto err_free;
 105
 106        blkg->q = q;
 107        INIT_LIST_HEAD(&blkg->q_node);
 108        blkg->blkcg = blkcg;
 109        atomic_set(&blkg->refcnt, 1);
 110
 111        /* root blkg uses @q->root_rl, init rl only for !root blkgs */
 112        if (blkcg != &blkcg_root) {
 113                if (blk_init_rl(&blkg->rl, q, gfp_mask))
 114                        goto err_free;
 115                blkg->rl.blkg = blkg;
 116        }
 117
 118        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 119                struct blkcg_policy *pol = blkcg_policy[i];
 120                struct blkg_policy_data *pd;
 121
 122                if (!blkcg_policy_enabled(q, pol))
 123                        continue;
 124
 125                /* alloc per-policy data and attach it to blkg */
 126                pd = pol->pd_alloc_fn(gfp_mask, q->node);
 127                if (!pd)
 128                        goto err_free;
 129
 130                blkg->pd[i] = pd;
 131                pd->blkg = blkg;
 132                pd->plid = i;
 133        }
 134
 135        return blkg;
 136
 137err_free:
 138        blkg_free(blkg);
 139        return NULL;
 140}
 141
 142struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 143                                      struct request_queue *q, bool update_hint)
 144{
 145        struct blkcg_gq *blkg;
 146
 147        /*
 148         * Hint didn't match.  Look up from the radix tree.  Note that the
 149         * hint can only be updated under queue_lock as otherwise @blkg
 150         * could have already been removed from blkg_tree.  The caller is
 151         * responsible for grabbing queue_lock if @update_hint.
 152         */
 153        blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 154        if (blkg && blkg->q == q) {
 155                if (update_hint) {
 156                        lockdep_assert_held(q->queue_lock);
 157                        rcu_assign_pointer(blkcg->blkg_hint, blkg);
 158                }
 159                return blkg;
 160        }
 161
 162        return NULL;
 163}
 164EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
 165
 166/*
 167 * If @new_blkg is %NULL, this function tries to allocate a new one as
 168 * necessary using %GFP_NOWAIT.  @new_blkg is always consumed on return.
 169 */
 170static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 171                                    struct request_queue *q,
 172                                    struct blkcg_gq *new_blkg)
 173{
 174        struct blkcg_gq *blkg;
 175        struct bdi_writeback_congested *wb_congested;
 176        int i, ret;
 177
 178        WARN_ON_ONCE(!rcu_read_lock_held());
 179        lockdep_assert_held(q->queue_lock);
 180
 181        /* blkg holds a reference to blkcg */
 182        if (!css_tryget_online(&blkcg->css)) {
 183                ret = -ENODEV;
 184                goto err_free_blkg;
 185        }
 186
 187        wb_congested = wb_congested_get_create(&q->backing_dev_info,
 188                                               blkcg->css.id, GFP_NOWAIT);
 189        if (!wb_congested) {
 190                ret = -ENOMEM;
 191                goto err_put_css;
 192        }
 193
 194        /* allocate */
 195        if (!new_blkg) {
 196                new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
 197                if (unlikely(!new_blkg)) {
 198                        ret = -ENOMEM;
 199                        goto err_put_congested;
 200                }
 201        }
 202        blkg = new_blkg;
 203        blkg->wb_congested = wb_congested;
 204
 205        /* link parent */
 206        if (blkcg_parent(blkcg)) {
 207                blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
 208                if (WARN_ON_ONCE(!blkg->parent)) {
 209                        ret = -ENODEV;
 210                        goto err_put_congested;
 211                }
 212                blkg_get(blkg->parent);
 213        }
 214
 215        /* invoke per-policy init */
 216        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 217                struct blkcg_policy *pol = blkcg_policy[i];
 218
 219                if (blkg->pd[i] && pol->pd_init_fn)
 220                        pol->pd_init_fn(blkg->pd[i]);
 221        }
 222
 223        /* insert */
 224        spin_lock(&blkcg->lock);
 225        ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 226        if (likely(!ret)) {
 227                hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 228                list_add(&blkg->q_node, &q->blkg_list);
 229
 230                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 231                        struct blkcg_policy *pol = blkcg_policy[i];
 232
 233                        if (blkg->pd[i] && pol->pd_online_fn)
 234                                pol->pd_online_fn(blkg->pd[i]);
 235                }
 236        }
 237        blkg->online = true;
 238        spin_unlock(&blkcg->lock);
 239
 240        if (!ret)
 241                return blkg;
 242
 243        /* @blkg failed fully initialized, use the usual release path */
 244        blkg_put(blkg);
 245        return ERR_PTR(ret);
 246
 247err_put_congested:
 248        wb_congested_put(wb_congested);
 249err_put_css:
 250        css_put(&blkcg->css);
 251err_free_blkg:
 252        blkg_free(new_blkg);
 253        return ERR_PTR(ret);
 254}
 255
 256/**
 257 * blkg_lookup_create - lookup blkg, try to create one if not there
 258 * @blkcg: blkcg of interest
 259 * @q: request_queue of interest
 260 *
 261 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
 262 * create one.  blkg creation is performed recursively from blkcg_root such
 263 * that all non-root blkg's have access to the parent blkg.  This function
 264 * should be called under RCU read lock and @q->queue_lock.
 265 *
 266 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 267 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 268 * dead and bypassing, returns ERR_PTR(-EBUSY).
 269 */
 270struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 271                                    struct request_queue *q)
 272{
 273        struct blkcg_gq *blkg;
 274
 275        WARN_ON_ONCE(!rcu_read_lock_held());
 276        lockdep_assert_held(q->queue_lock);
 277
 278        /*
 279         * This could be the first entry point of blkcg implementation and
 280         * we shouldn't allow anything to go through for a bypassing queue.
 281         */
 282        if (unlikely(blk_queue_bypass(q)))
 283                return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
 284
 285        blkg = __blkg_lookup(blkcg, q, true);
 286        if (blkg)
 287                return blkg;
 288
 289        /*
 290         * Create blkgs walking down from blkcg_root to @blkcg, so that all
 291         * non-root blkgs have access to their parents.
 292         */
 293        while (true) {
 294                struct blkcg *pos = blkcg;
 295                struct blkcg *parent = blkcg_parent(blkcg);
 296
 297                while (parent && !__blkg_lookup(parent, q, false)) {
 298                        pos = parent;
 299                        parent = blkcg_parent(parent);
 300                }
 301
 302                blkg = blkg_create(pos, q, NULL);
 303                if (pos == blkcg || IS_ERR(blkg))
 304                        return blkg;
 305        }
 306}
 307
 308static void blkg_destroy(struct blkcg_gq *blkg)
 309{
 310        struct blkcg *blkcg = blkg->blkcg;
 311        struct blkcg_gq *parent = blkg->parent;
 312        int i;
 313
 314        lockdep_assert_held(blkg->q->queue_lock);
 315        lockdep_assert_held(&blkcg->lock);
 316
 317        /* Something wrong if we are trying to remove same group twice */
 318        WARN_ON_ONCE(list_empty(&blkg->q_node));
 319        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 320
 321        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 322                struct blkcg_policy *pol = blkcg_policy[i];
 323
 324                if (blkg->pd[i] && pol->pd_offline_fn)
 325                        pol->pd_offline_fn(blkg->pd[i]);
 326        }
 327
 328        if (parent) {
 329                blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
 330                blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
 331        }
 332
 333        blkg->online = false;
 334
 335        radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 336        list_del_init(&blkg->q_node);
 337        hlist_del_init_rcu(&blkg->blkcg_node);
 338
 339        /*
 340         * Both setting lookup hint to and clearing it from @blkg are done
 341         * under queue_lock.  If it's not pointing to @blkg now, it never
 342         * will.  Hint assignment itself can race safely.
 343         */
 344        if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
 345                rcu_assign_pointer(blkcg->blkg_hint, NULL);
 346
 347        /*
 348         * Put the reference taken at the time of creation so that when all
 349         * queues are gone, group can be destroyed.
 350         */
 351        blkg_put(blkg);
 352}
 353
 354/**
 355 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 356 * @q: request_queue of interest
 357 *
 358 * Destroy all blkgs associated with @q.
 359 */
 360static void blkg_destroy_all(struct request_queue *q)
 361{
 362        struct blkcg_gq *blkg, *n;
 363
 364        lockdep_assert_held(q->queue_lock);
 365
 366        list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 367                struct blkcg *blkcg = blkg->blkcg;
 368
 369                spin_lock(&blkcg->lock);
 370                blkg_destroy(blkg);
 371                spin_unlock(&blkcg->lock);
 372        }
 373
 374        q->root_blkg = NULL;
 375        q->root_rl.blkg = NULL;
 376}
 377
 378/*
 379 * A group is RCU protected, but having an rcu lock does not mean that one
 380 * can access all the fields of blkg and assume these are valid.  For
 381 * example, don't try to follow throtl_data and request queue links.
 382 *
 383 * Having a reference to blkg under an rcu allows accesses to only values
 384 * local to groups like group stats and group rate limits.
 385 */
 386void __blkg_release_rcu(struct rcu_head *rcu_head)
 387{
 388        struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
 389
 390        /* release the blkcg and parent blkg refs this blkg has been holding */
 391        css_put(&blkg->blkcg->css);
 392        if (blkg->parent)
 393                blkg_put(blkg->parent);
 394
 395        wb_congested_put(blkg->wb_congested);
 396
 397        blkg_free(blkg);
 398}
 399EXPORT_SYMBOL_GPL(__blkg_release_rcu);
 400
 401/*
 402 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 403 * because the root blkg uses @q->root_rl instead of its own rl.
 404 */
 405struct request_list *__blk_queue_next_rl(struct request_list *rl,
 406                                         struct request_queue *q)
 407{
 408        struct list_head *ent;
 409        struct blkcg_gq *blkg;
 410
 411        /*
 412         * Determine the current blkg list_head.  The first entry is
 413         * root_rl which is off @q->blkg_list and mapped to the head.
 414         */
 415        if (rl == &q->root_rl) {
 416                ent = &q->blkg_list;
 417                /* There are no more block groups, hence no request lists */
 418                if (list_empty(ent))
 419                        return NULL;
 420        } else {
 421                blkg = container_of(rl, struct blkcg_gq, rl);
 422                ent = &blkg->q_node;
 423        }
 424
 425        /* walk to the next list_head, skip root blkcg */
 426        ent = ent->next;
 427        if (ent == &q->root_blkg->q_node)
 428                ent = ent->next;
 429        if (ent == &q->blkg_list)
 430                return NULL;
 431
 432        blkg = container_of(ent, struct blkcg_gq, q_node);
 433        return &blkg->rl;
 434}
 435
 436static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 437                             struct cftype *cftype, u64 val)
 438{
 439        struct blkcg *blkcg = css_to_blkcg(css);
 440        struct blkcg_gq *blkg;
 441        int i;
 442
 443        mutex_lock(&blkcg_pol_mutex);
 444        spin_lock_irq(&blkcg->lock);
 445
 446        /*
 447         * Note that stat reset is racy - it doesn't synchronize against
 448         * stat updates.  This is a debug feature which shouldn't exist
 449         * anyway.  If you get hit by a race, retry.
 450         */
 451        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 452                blkg_rwstat_reset(&blkg->stat_bytes);
 453                blkg_rwstat_reset(&blkg->stat_ios);
 454
 455                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 456                        struct blkcg_policy *pol = blkcg_policy[i];
 457
 458                        if (blkg->pd[i] && pol->pd_reset_stats_fn)
 459                                pol->pd_reset_stats_fn(blkg->pd[i]);
 460                }
 461        }
 462
 463        spin_unlock_irq(&blkcg->lock);
 464        mutex_unlock(&blkcg_pol_mutex);
 465        return 0;
 466}
 467
 468const char *blkg_dev_name(struct blkcg_gq *blkg)
 469{
 470        /* some drivers (floppy) instantiate a queue w/o disk registered */
 471        if (blkg->q->backing_dev_info.dev)
 472                return dev_name(blkg->q->backing_dev_info.dev);
 473        return NULL;
 474}
 475EXPORT_SYMBOL_GPL(blkg_dev_name);
 476
 477/**
 478 * blkcg_print_blkgs - helper for printing per-blkg data
 479 * @sf: seq_file to print to
 480 * @blkcg: blkcg of interest
 481 * @prfill: fill function to print out a blkg
 482 * @pol: policy in question
 483 * @data: data to be passed to @prfill
 484 * @show_total: to print out sum of prfill return values or not
 485 *
 486 * This function invokes @prfill on each blkg of @blkcg if pd for the
 487 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 488 * policy data and @data and the matching queue lock held.  If @show_total
 489 * is %true, the sum of the return values from @prfill is printed with
 490 * "Total" label at the end.
 491 *
 492 * This is to be used to construct print functions for
 493 * cftype->read_seq_string method.
 494 */
 495void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 496                       u64 (*prfill)(struct seq_file *,
 497                                     struct blkg_policy_data *, int),
 498                       const struct blkcg_policy *pol, int data,
 499                       bool show_total)
 500{
 501        struct blkcg_gq *blkg;
 502        u64 total = 0;
 503
 504        rcu_read_lock();
 505        hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 506                spin_lock_irq(blkg->q->queue_lock);
 507                if (blkcg_policy_enabled(blkg->q, pol))
 508                        total += prfill(sf, blkg->pd[pol->plid], data);
 509                spin_unlock_irq(blkg->q->queue_lock);
 510        }
 511        rcu_read_unlock();
 512
 513        if (show_total)
 514                seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 515}
 516EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 517
 518/**
 519 * __blkg_prfill_u64 - prfill helper for a single u64 value
 520 * @sf: seq_file to print to
 521 * @pd: policy private data of interest
 522 * @v: value to print
 523 *
 524 * Print @v to @sf for the device assocaited with @pd.
 525 */
 526u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 527{
 528        const char *dname = blkg_dev_name(pd->blkg);
 529
 530        if (!dname)
 531                return 0;
 532
 533        seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 534        return v;
 535}
 536EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 537
 538/**
 539 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 540 * @sf: seq_file to print to
 541 * @pd: policy private data of interest
 542 * @rwstat: rwstat to print
 543 *
 544 * Print @rwstat to @sf for the device assocaited with @pd.
 545 */
 546u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 547                         const struct blkg_rwstat *rwstat)
 548{
 549        static const char *rwstr[] = {
 550                [BLKG_RWSTAT_READ]      = "Read",
 551                [BLKG_RWSTAT_WRITE]     = "Write",
 552                [BLKG_RWSTAT_SYNC]      = "Sync",
 553                [BLKG_RWSTAT_ASYNC]     = "Async",
 554        };
 555        const char *dname = blkg_dev_name(pd->blkg);
 556        u64 v;
 557        int i;
 558
 559        if (!dname)
 560                return 0;
 561
 562        for (i = 0; i < BLKG_RWSTAT_NR; i++)
 563                seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
 564                           (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
 565
 566        v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
 567                atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]);
 568        seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
 569        return v;
 570}
 571EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
 572
 573/**
 574 * blkg_prfill_stat - prfill callback for blkg_stat
 575 * @sf: seq_file to print to
 576 * @pd: policy private data of interest
 577 * @off: offset to the blkg_stat in @pd
 578 *
 579 * prfill callback for printing a blkg_stat.
 580 */
 581u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
 582{
 583        return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
 584}
 585EXPORT_SYMBOL_GPL(blkg_prfill_stat);
 586
 587/**
 588 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 589 * @sf: seq_file to print to
 590 * @pd: policy private data of interest
 591 * @off: offset to the blkg_rwstat in @pd
 592 *
 593 * prfill callback for printing a blkg_rwstat.
 594 */
 595u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 596                       int off)
 597{
 598        struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
 599
 600        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 601}
 602EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 603
 604static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
 605                                    struct blkg_policy_data *pd, int off)
 606{
 607        struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
 608
 609        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 610}
 611
 612/**
 613 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
 614 * @sf: seq_file to print to
 615 * @v: unused
 616 *
 617 * To be used as cftype->seq_show to print blkg->stat_bytes.
 618 * cftype->private must be set to the blkcg_policy.
 619 */
 620int blkg_print_stat_bytes(struct seq_file *sf, void *v)
 621{
 622        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 623                          blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 624                          offsetof(struct blkcg_gq, stat_bytes), true);
 625        return 0;
 626}
 627EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
 628
 629/**
 630 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
 631 * @sf: seq_file to print to
 632 * @v: unused
 633 *
 634 * To be used as cftype->seq_show to print blkg->stat_ios.  cftype->private
 635 * must be set to the blkcg_policy.
 636 */
 637int blkg_print_stat_ios(struct seq_file *sf, void *v)
 638{
 639        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 640                          blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
 641                          offsetof(struct blkcg_gq, stat_ios), true);
 642        return 0;
 643}
 644EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
 645
 646static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
 647                                              struct blkg_policy_data *pd,
 648                                              int off)
 649{
 650        struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
 651                                                              NULL, off);
 652        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 653}
 654
 655/**
 656 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
 657 * @sf: seq_file to print to
 658 * @v: unused
 659 */
 660int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
 661{
 662        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 663                          blkg_prfill_rwstat_field_recursive,
 664                          (void *)seq_cft(sf)->private,
 665                          offsetof(struct blkcg_gq, stat_bytes), true);
 666        return 0;
 667}
 668EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
 669
 670/**
 671 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
 672 * @sf: seq_file to print to
 673 * @v: unused
 674 */
 675int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
 676{
 677        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 678                          blkg_prfill_rwstat_field_recursive,
 679                          (void *)seq_cft(sf)->private,
 680                          offsetof(struct blkcg_gq, stat_ios), true);
 681        return 0;
 682}
 683EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
 684
 685/**
 686 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
 687 * @blkg: blkg of interest
 688 * @pol: blkcg_policy which contains the blkg_stat
 689 * @off: offset to the blkg_stat in blkg_policy_data or @blkg
 690 *
 691 * Collect the blkg_stat specified by @blkg, @pol and @off and all its
 692 * online descendants and their aux counts.  The caller must be holding the
 693 * queue lock for online tests.
 694 *
 695 * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
 696 * at @off bytes into @blkg's blkg_policy_data of the policy.
 697 */
 698u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
 699                            struct blkcg_policy *pol, int off)
 700{
 701        struct blkcg_gq *pos_blkg;
 702        struct cgroup_subsys_state *pos_css;
 703        u64 sum = 0;
 704
 705        lockdep_assert_held(blkg->q->queue_lock);
 706
 707        rcu_read_lock();
 708        blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 709                struct blkg_stat *stat;
 710
 711                if (!pos_blkg->online)
 712                        continue;
 713
 714                if (pol)
 715                        stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 716                else
 717                        stat = (void *)blkg + off;
 718
 719                sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
 720        }
 721        rcu_read_unlock();
 722
 723        return sum;
 724}
 725EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
 726
 727/**
 728 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 729 * @blkg: blkg of interest
 730 * @pol: blkcg_policy which contains the blkg_rwstat
 731 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
 732 *
 733 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
 734 * online descendants and their aux counts.  The caller must be holding the
 735 * queue lock for online tests.
 736 *
 737 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
 738 * is at @off bytes into @blkg's blkg_policy_data of the policy.
 739 */
 740struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
 741                                             struct blkcg_policy *pol, int off)
 742{
 743        struct blkcg_gq *pos_blkg;
 744        struct cgroup_subsys_state *pos_css;
 745        struct blkg_rwstat sum = { };
 746        int i;
 747
 748        lockdep_assert_held(blkg->q->queue_lock);
 749
 750        rcu_read_lock();
 751        blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
 752                struct blkg_rwstat *rwstat;
 753
 754                if (!pos_blkg->online)
 755                        continue;
 756
 757                if (pol)
 758                        rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
 759                else
 760                        rwstat = (void *)pos_blkg + off;
 761
 762                for (i = 0; i < BLKG_RWSTAT_NR; i++)
 763                        atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
 764                                percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
 765                                &sum.aux_cnt[i]);
 766        }
 767        rcu_read_unlock();
 768
 769        return sum;
 770}
 771EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
 772
 773/**
 774 * blkg_conf_prep - parse and prepare for per-blkg config update
 775 * @blkcg: target block cgroup
 776 * @pol: target policy
 777 * @input: input string
 778 * @ctx: blkg_conf_ctx to be filled
 779 *
 780 * Parse per-blkg config update from @input and initialize @ctx with the
 781 * result.  @ctx->blkg points to the blkg to be updated and @ctx->body the
 782 * part of @input following MAJ:MIN.  This function returns with RCU read
 783 * lock and queue lock held and must be paired with blkg_conf_finish().
 784 */
 785int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 786                   char *input, struct blkg_conf_ctx *ctx)
 787        __acquires(rcu) __acquires(disk->queue->queue_lock)
 788{
 789        struct gendisk *disk;
 790        struct blkcg_gq *blkg;
 791        struct module *owner;
 792        unsigned int major, minor;
 793        int key_len, part, ret;
 794        char *body;
 795
 796        if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
 797                return -EINVAL;
 798
 799        body = input + key_len;
 800        if (!isspace(*body))
 801                return -EINVAL;
 802        body = skip_spaces(body);
 803
 804        disk = get_gendisk(MKDEV(major, minor), &part);
 805        if (!disk)
 806                return -ENODEV;
 807        if (part) {
 808                owner = disk->fops->owner;
 809                put_disk(disk);
 810                module_put(owner);
 811                return -ENODEV;
 812        }
 813
 814        rcu_read_lock();
 815        spin_lock_irq(disk->queue->queue_lock);
 816
 817        if (blkcg_policy_enabled(disk->queue, pol))
 818                blkg = blkg_lookup_create(blkcg, disk->queue);
 819        else
 820                blkg = ERR_PTR(-EOPNOTSUPP);
 821
 822        if (IS_ERR(blkg)) {
 823                ret = PTR_ERR(blkg);
 824                rcu_read_unlock();
 825                spin_unlock_irq(disk->queue->queue_lock);
 826                owner = disk->fops->owner;
 827                put_disk(disk);
 828                module_put(owner);
 829                /*
 830                 * If queue was bypassing, we should retry.  Do so after a
 831                 * short msleep().  It isn't strictly necessary but queue
 832                 * can be bypassing for some time and it's always nice to
 833                 * avoid busy looping.
 834                 */
 835                if (ret == -EBUSY) {
 836                        msleep(10);
 837                        ret = restart_syscall();
 838                }
 839                return ret;
 840        }
 841
 842        ctx->disk = disk;
 843        ctx->blkg = blkg;
 844        ctx->body = body;
 845        return 0;
 846}
 847EXPORT_SYMBOL_GPL(blkg_conf_prep);
 848
 849/**
 850 * blkg_conf_finish - finish up per-blkg config update
 851 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 852 *
 853 * Finish up after per-blkg config update.  This function must be paired
 854 * with blkg_conf_prep().
 855 */
 856void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 857        __releases(ctx->disk->queue->queue_lock) __releases(rcu)
 858{
 859        struct module *owner;
 860
 861        spin_unlock_irq(ctx->disk->queue->queue_lock);
 862        rcu_read_unlock();
 863        owner = ctx->disk->fops->owner;
 864        put_disk(ctx->disk);
 865        module_put(owner);
 866}
 867EXPORT_SYMBOL_GPL(blkg_conf_finish);
 868
 869static int blkcg_print_stat(struct seq_file *sf, void *v)
 870{
 871        struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
 872        struct blkcg_gq *blkg;
 873
 874        rcu_read_lock();
 875
 876        hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 877                const char *dname;
 878                struct blkg_rwstat rwstat;
 879                u64 rbytes, wbytes, rios, wios;
 880
 881                dname = blkg_dev_name(blkg);
 882                if (!dname)
 883                        continue;
 884
 885                spin_lock_irq(blkg->q->queue_lock);
 886
 887                rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
 888                                        offsetof(struct blkcg_gq, stat_bytes));
 889                rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
 890                wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
 891
 892                rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
 893                                        offsetof(struct blkcg_gq, stat_ios));
 894                rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
 895                wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
 896
 897                spin_unlock_irq(blkg->q->queue_lock);
 898
 899                if (rbytes || wbytes || rios || wios)
 900                        seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n",
 901                                   dname, rbytes, wbytes, rios, wios);
 902        }
 903
 904        rcu_read_unlock();
 905        return 0;
 906}
 907
 908static struct cftype blkcg_files[] = {
 909        {
 910                .name = "stat",
 911                .flags = CFTYPE_NOT_ON_ROOT,
 912                .seq_show = blkcg_print_stat,
 913        },
 914        { }     /* terminate */
 915};
 916
 917static struct cftype blkcg_legacy_files[] = {
 918        {
 919                .name = "reset_stats",
 920                .write_u64 = blkcg_reset_stats,
 921        },
 922        { }     /* terminate */
 923};
 924
 925/**
 926 * blkcg_css_offline - cgroup css_offline callback
 927 * @css: css of interest
 928 *
 929 * This function is called when @css is about to go away and responsible
 930 * for shooting down all blkgs associated with @css.  blkgs should be
 931 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 932 * inside q lock, this function performs reverse double lock dancing.
 933 *
 934 * This is the blkcg counterpart of ioc_release_fn().
 935 */
 936static void blkcg_css_offline(struct cgroup_subsys_state *css)
 937{
 938        struct blkcg *blkcg = css_to_blkcg(css);
 939
 940        spin_lock_irq(&blkcg->lock);
 941
 942        while (!hlist_empty(&blkcg->blkg_list)) {
 943                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
 944                                                struct blkcg_gq, blkcg_node);
 945                struct request_queue *q = blkg->q;
 946
 947                if (spin_trylock(q->queue_lock)) {
 948                        blkg_destroy(blkg);
 949                        spin_unlock(q->queue_lock);
 950                } else {
 951                        spin_unlock_irq(&blkcg->lock);
 952                        cpu_relax();
 953                        spin_lock_irq(&blkcg->lock);
 954                }
 955        }
 956
 957        spin_unlock_irq(&blkcg->lock);
 958
 959        wb_blkcg_offline(blkcg);
 960}
 961
 962static void blkcg_css_free(struct cgroup_subsys_state *css)
 963{
 964        struct blkcg *blkcg = css_to_blkcg(css);
 965        int i;
 966
 967        mutex_lock(&blkcg_pol_mutex);
 968
 969        list_del(&blkcg->all_blkcgs_node);
 970
 971        for (i = 0; i < BLKCG_MAX_POLS; i++)
 972                if (blkcg->cpd[i])
 973                        blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
 974
 975        mutex_unlock(&blkcg_pol_mutex);
 976
 977        kfree(blkcg);
 978}
 979
 980static struct cgroup_subsys_state *
 981blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 982{
 983        struct blkcg *blkcg;
 984        struct cgroup_subsys_state *ret;
 985        int i;
 986
 987        mutex_lock(&blkcg_pol_mutex);
 988
 989        if (!parent_css) {
 990                blkcg = &blkcg_root;
 991        } else {
 992                blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
 993                if (!blkcg) {
 994                        ret = ERR_PTR(-ENOMEM);
 995                        goto free_blkcg;
 996                }
 997        }
 998
 999        for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1000                struct blkcg_policy *pol = blkcg_policy[i];
1001                struct blkcg_policy_data *cpd;
1002
1003                /*
1004                 * If the policy hasn't been attached yet, wait for it
1005                 * to be attached before doing anything else. Otherwise,
1006                 * check if the policy requires any specific per-cgroup
1007                 * data: if it does, allocate and initialize it.
1008                 */
1009                if (!pol || !pol->cpd_alloc_fn)
1010                        continue;
1011
1012                cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1013                if (!cpd) {
1014                        ret = ERR_PTR(-ENOMEM);
1015                        goto free_pd_blkcg;
1016                }
1017                blkcg->cpd[i] = cpd;
1018                cpd->blkcg = blkcg;
1019                cpd->plid = i;
1020                if (pol->cpd_init_fn)
1021                        pol->cpd_init_fn(cpd);
1022        }
1023
1024        spin_lock_init(&blkcg->lock);
1025        INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
1026        INIT_HLIST_HEAD(&blkcg->blkg_list);
1027#ifdef CONFIG_CGROUP_WRITEBACK
1028        INIT_LIST_HEAD(&blkcg->cgwb_list);
1029#endif
1030        list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1031
1032        mutex_unlock(&blkcg_pol_mutex);
1033        return &blkcg->css;
1034
1035free_pd_blkcg:
1036        for (i--; i >= 0; i--)
1037                if (blkcg->cpd[i])
1038                        blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1039free_blkcg:
1040        kfree(blkcg);
1041        mutex_unlock(&blkcg_pol_mutex);
1042        return ret;
1043}
1044
1045/**
1046 * blkcg_init_queue - initialize blkcg part of request queue
1047 * @q: request_queue to initialize
1048 *
1049 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1050 * part of new request_queue @q.
1051 *
1052 * RETURNS:
1053 * 0 on success, -errno on failure.
1054 */
1055int blkcg_init_queue(struct request_queue *q)
1056{
1057        struct blkcg_gq *new_blkg, *blkg;
1058        bool preloaded;
1059        int ret;
1060
1061        new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1062        if (!new_blkg)
1063                return -ENOMEM;
1064
1065        preloaded = !radix_tree_preload(GFP_KERNEL);
1066
1067        /*
1068         * Make sure the root blkg exists and count the existing blkgs.  As
1069         * @q is bypassing at this point, blkg_lookup_create() can't be
1070         * used.  Open code insertion.
1071         */
1072        rcu_read_lock();
1073        spin_lock_irq(q->queue_lock);
1074        blkg = blkg_create(&blkcg_root, q, new_blkg);
1075        spin_unlock_irq(q->queue_lock);
1076        rcu_read_unlock();
1077
1078        if (preloaded)
1079                radix_tree_preload_end();
1080
1081        if (IS_ERR(blkg)) {
1082                blkg_free(new_blkg);
1083                return PTR_ERR(blkg);
1084        }
1085
1086        q->root_blkg = blkg;
1087        q->root_rl.blkg = blkg;
1088
1089        ret = blk_throtl_init(q);
1090        if (ret) {
1091                spin_lock_irq(q->queue_lock);
1092                blkg_destroy_all(q);
1093                spin_unlock_irq(q->queue_lock);
1094        }
1095        return ret;
1096}
1097
1098/**
1099 * blkcg_drain_queue - drain blkcg part of request_queue
1100 * @q: request_queue to drain
1101 *
1102 * Called from blk_drain_queue().  Responsible for draining blkcg part.
1103 */
1104void blkcg_drain_queue(struct request_queue *q)
1105{
1106        lockdep_assert_held(q->queue_lock);
1107
1108        /*
1109         * @q could be exiting and already have destroyed all blkgs as
1110         * indicated by NULL root_blkg.  If so, don't confuse policies.
1111         */
1112        if (!q->root_blkg)
1113                return;
1114
1115        blk_throtl_drain(q);
1116}
1117
1118/**
1119 * blkcg_exit_queue - exit and release blkcg part of request_queue
1120 * @q: request_queue being released
1121 *
1122 * Called from blk_release_queue().  Responsible for exiting blkcg part.
1123 */
1124void blkcg_exit_queue(struct request_queue *q)
1125{
1126        spin_lock_irq(q->queue_lock);
1127        blkg_destroy_all(q);
1128        spin_unlock_irq(q->queue_lock);
1129
1130        blk_throtl_exit(q);
1131}
1132
1133/*
1134 * We cannot support shared io contexts, as we have no mean to support
1135 * two tasks with the same ioc in two different groups without major rework
1136 * of the main cic data structures.  For now we allow a task to change
1137 * its cgroup only if it's the only owner of its ioc.
1138 */
1139static int blkcg_can_attach(struct cgroup_taskset *tset)
1140{
1141        struct task_struct *task;
1142        struct cgroup_subsys_state *dst_css;
1143        struct io_context *ioc;
1144        int ret = 0;
1145
1146        /* task_lock() is needed to avoid races with exit_io_context() */
1147        cgroup_taskset_for_each(task, dst_css, tset) {
1148                task_lock(task);
1149                ioc = task->io_context;
1150                if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1151                        ret = -EINVAL;
1152                task_unlock(task);
1153                if (ret)
1154                        break;
1155        }
1156        return ret;
1157}
1158
1159static void blkcg_bind(struct cgroup_subsys_state *root_css)
1160{
1161        int i;
1162
1163        mutex_lock(&blkcg_pol_mutex);
1164
1165        for (i = 0; i < BLKCG_MAX_POLS; i++) {
1166                struct blkcg_policy *pol = blkcg_policy[i];
1167                struct blkcg *blkcg;
1168
1169                if (!pol || !pol->cpd_bind_fn)
1170                        continue;
1171
1172                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1173                        if (blkcg->cpd[pol->plid])
1174                                pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1175        }
1176        mutex_unlock(&blkcg_pol_mutex);
1177}
1178
1179struct cgroup_subsys io_cgrp_subsys = {
1180        .css_alloc = blkcg_css_alloc,
1181        .css_offline = blkcg_css_offline,
1182        .css_free = blkcg_css_free,
1183        .can_attach = blkcg_can_attach,
1184        .bind = blkcg_bind,
1185        .dfl_cftypes = blkcg_files,
1186        .legacy_cftypes = blkcg_legacy_files,
1187        .legacy_name = "blkio",
1188#ifdef CONFIG_MEMCG
1189        /*
1190         * This ensures that, if available, memcg is automatically enabled
1191         * together on the default hierarchy so that the owner cgroup can
1192         * be retrieved from writeback pages.
1193         */
1194        .depends_on = 1 << memory_cgrp_id,
1195#endif
1196};
1197EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1198
1199/**
1200 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1201 * @q: request_queue of interest
1202 * @pol: blkcg policy to activate
1203 *
1204 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
1205 * bypass mode to populate its blkgs with policy_data for @pol.
1206 *
1207 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1208 * from IO path.  Update of each blkg is protected by both queue and blkcg
1209 * locks so that holding either lock and testing blkcg_policy_enabled() is
1210 * always enough for dereferencing policy data.
1211 *
1212 * The caller is responsible for synchronizing [de]activations and policy
1213 * [un]registerations.  Returns 0 on success, -errno on failure.
1214 */
1215int blkcg_activate_policy(struct request_queue *q,
1216                          const struct blkcg_policy *pol)
1217{
1218        struct blkg_policy_data *pd_prealloc = NULL;
1219        struct blkcg_gq *blkg;
1220        int ret;
1221
1222        if (blkcg_policy_enabled(q, pol))
1223                return 0;
1224
1225        blk_queue_bypass_start(q);
1226pd_prealloc:
1227        if (!pd_prealloc) {
1228                pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1229                if (!pd_prealloc) {
1230                        ret = -ENOMEM;
1231                        goto out_bypass_end;
1232                }
1233        }
1234
1235        spin_lock_irq(q->queue_lock);
1236
1237        list_for_each_entry(blkg, &q->blkg_list, q_node) {
1238                struct blkg_policy_data *pd;
1239
1240                if (blkg->pd[pol->plid])
1241                        continue;
1242
1243                pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
1244                if (!pd)
1245                        swap(pd, pd_prealloc);
1246                if (!pd) {
1247                        spin_unlock_irq(q->queue_lock);
1248                        goto pd_prealloc;
1249                }
1250
1251                blkg->pd[pol->plid] = pd;
1252                pd->blkg = blkg;
1253                pd->plid = pol->plid;
1254                if (pol->pd_init_fn)
1255                        pol->pd_init_fn(pd);
1256        }
1257
1258        __set_bit(pol->plid, q->blkcg_pols);
1259        ret = 0;
1260
1261        spin_unlock_irq(q->queue_lock);
1262out_bypass_end:
1263        blk_queue_bypass_end(q);
1264        if (pd_prealloc)
1265                pol->pd_free_fn(pd_prealloc);
1266        return ret;
1267}
1268EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1269
1270/**
1271 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1272 * @q: request_queue of interest
1273 * @pol: blkcg policy to deactivate
1274 *
1275 * Deactivate @pol on @q.  Follows the same synchronization rules as
1276 * blkcg_activate_policy().
1277 */
1278void blkcg_deactivate_policy(struct request_queue *q,
1279                             const struct blkcg_policy *pol)
1280{
1281        struct blkcg_gq *blkg;
1282
1283        if (!blkcg_policy_enabled(q, pol))
1284                return;
1285
1286        blk_queue_bypass_start(q);
1287        spin_lock_irq(q->queue_lock);
1288
1289        __clear_bit(pol->plid, q->blkcg_pols);
1290
1291        list_for_each_entry(blkg, &q->blkg_list, q_node) {
1292                /* grab blkcg lock too while removing @pd from @blkg */
1293                spin_lock(&blkg->blkcg->lock);
1294
1295                if (blkg->pd[pol->plid]) {
1296                        if (pol->pd_offline_fn)
1297                                pol->pd_offline_fn(blkg->pd[pol->plid]);
1298                        pol->pd_free_fn(blkg->pd[pol->plid]);
1299                        blkg->pd[pol->plid] = NULL;
1300                }
1301
1302                spin_unlock(&blkg->blkcg->lock);
1303        }
1304
1305        spin_unlock_irq(q->queue_lock);
1306        blk_queue_bypass_end(q);
1307}
1308EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1309
1310/**
1311 * blkcg_policy_register - register a blkcg policy
1312 * @pol: blkcg policy to register
1313 *
1314 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1315 * successful registration.  Returns 0 on success and -errno on failure.
1316 */
1317int blkcg_policy_register(struct blkcg_policy *pol)
1318{
1319        struct blkcg *blkcg;
1320        int i, ret;
1321
1322        mutex_lock(&blkcg_pol_register_mutex);
1323        mutex_lock(&blkcg_pol_mutex);
1324
1325        /* find an empty slot */
1326        ret = -ENOSPC;
1327        for (i = 0; i < BLKCG_MAX_POLS; i++)
1328                if (!blkcg_policy[i])
1329                        break;
1330        if (i >= BLKCG_MAX_POLS)
1331                goto err_unlock;
1332
1333        /* register @pol */
1334        pol->plid = i;
1335        blkcg_policy[pol->plid] = pol;
1336
1337        /* allocate and install cpd's */
1338        if (pol->cpd_alloc_fn) {
1339                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1340                        struct blkcg_policy_data *cpd;
1341
1342                        cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1343                        if (!cpd)
1344                                goto err_free_cpds;
1345
1346                        blkcg->cpd[pol->plid] = cpd;
1347                        cpd->blkcg = blkcg;
1348                        cpd->plid = pol->plid;
1349                        pol->cpd_init_fn(cpd);
1350                }
1351        }
1352
1353        mutex_unlock(&blkcg_pol_mutex);
1354
1355        /* everything is in place, add intf files for the new policy */
1356        if (pol->dfl_cftypes)
1357                WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1358                                               pol->dfl_cftypes));
1359        if (pol->legacy_cftypes)
1360                WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1361                                                  pol->legacy_cftypes));
1362        mutex_unlock(&blkcg_pol_register_mutex);
1363        return 0;
1364
1365err_free_cpds:
1366        if (pol->cpd_alloc_fn) {
1367                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1368                        if (blkcg->cpd[pol->plid]) {
1369                                pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1370                                blkcg->cpd[pol->plid] = NULL;
1371                        }
1372                }
1373        }
1374        blkcg_policy[pol->plid] = NULL;
1375err_unlock:
1376        mutex_unlock(&blkcg_pol_mutex);
1377        mutex_unlock(&blkcg_pol_register_mutex);
1378        return ret;
1379}
1380EXPORT_SYMBOL_GPL(blkcg_policy_register);
1381
1382/**
1383 * blkcg_policy_unregister - unregister a blkcg policy
1384 * @pol: blkcg policy to unregister
1385 *
1386 * Undo blkcg_policy_register(@pol).  Might sleep.
1387 */
1388void blkcg_policy_unregister(struct blkcg_policy *pol)
1389{
1390        struct blkcg *blkcg;
1391
1392        mutex_lock(&blkcg_pol_register_mutex);
1393
1394        if (WARN_ON(blkcg_policy[pol->plid] != pol))
1395                goto out_unlock;
1396
1397        /* kill the intf files first */
1398        if (pol->dfl_cftypes)
1399                cgroup_rm_cftypes(pol->dfl_cftypes);
1400        if (pol->legacy_cftypes)
1401                cgroup_rm_cftypes(pol->legacy_cftypes);
1402
1403        /* remove cpds and unregister */
1404        mutex_lock(&blkcg_pol_mutex);
1405
1406        if (pol->cpd_alloc_fn) {
1407                list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1408                        if (blkcg->cpd[pol->plid]) {
1409                                pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1410                                blkcg->cpd[pol->plid] = NULL;
1411                        }
1412                }
1413        }
1414        blkcg_policy[pol->plid] = NULL;
1415
1416        mutex_unlock(&blkcg_pol_mutex);
1417out_unlock:
1418        mutex_unlock(&blkcg_pol_register_mutex);
1419}
1420EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1421