linux/include/linux/blk-cgroup.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BLK_CGROUP_H
   3#define _BLK_CGROUP_H
   4/*
   5 * Common Block IO controller cgroup interface
   6 *
   7 * Based on ideas and code from CFQ, CFS and BFQ:
   8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   9 *
  10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  11 *                    Paolo Valente <paolo.valente@unimore.it>
  12 *
  13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  14 *                    Nauman Rafique <nauman@google.com>
  15 */
  16
  17#include <linux/cgroup.h>
  18#include <linux/percpu.h>
  19#include <linux/percpu_counter.h>
  20#include <linux/u64_stats_sync.h>
  21#include <linux/seq_file.h>
  22#include <linux/radix-tree.h>
  23#include <linux/blkdev.h>
  24#include <linux/atomic.h>
  25#include <linux/kthread.h>
  26#include <linux/fs.h>
  27
  28/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
  29#define BLKG_STAT_CPU_BATCH     (INT_MAX / 2)
  30
  31/* Max limits for throttle policy */
  32#define THROTL_IOPS_MAX         UINT_MAX
  33#define FC_APPID_LEN              129
  34
  35
  36#ifdef CONFIG_BLK_CGROUP
  37
  38enum blkg_iostat_type {
  39        BLKG_IOSTAT_READ,
  40        BLKG_IOSTAT_WRITE,
  41        BLKG_IOSTAT_DISCARD,
  42
  43        BLKG_IOSTAT_NR,
  44};
  45
  46struct blkcg_gq;
  47
  48struct blkcg {
  49        struct cgroup_subsys_state      css;
  50        spinlock_t                      lock;
  51        refcount_t                      online_pin;
  52
  53        struct radix_tree_root          blkg_tree;
  54        struct blkcg_gq __rcu           *blkg_hint;
  55        struct hlist_head               blkg_list;
  56
  57        struct blkcg_policy_data        *cpd[BLKCG_MAX_POLS];
  58
  59        struct list_head                all_blkcgs_node;
  60#ifdef CONFIG_BLK_CGROUP_FC_APPID
  61        char                            fc_app_id[FC_APPID_LEN];
  62#endif
  63#ifdef CONFIG_CGROUP_WRITEBACK
  64        struct list_head                cgwb_list;
  65#endif
  66};
  67
  68struct blkg_iostat {
  69        u64                             bytes[BLKG_IOSTAT_NR];
  70        u64                             ios[BLKG_IOSTAT_NR];
  71};
  72
  73struct blkg_iostat_set {
  74        struct u64_stats_sync           sync;
  75        struct blkg_iostat              cur;
  76        struct blkg_iostat              last;
  77};
  78
  79/*
  80 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
  81 * request_queue (q).  This is used by blkcg policies which need to track
  82 * information per blkcg - q pair.
  83 *
  84 * There can be multiple active blkcg policies and each blkg:policy pair is
  85 * represented by a blkg_policy_data which is allocated and freed by each
  86 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
  87 * area by allocating larger data structure which embeds blkg_policy_data
  88 * at the beginning.
  89 */
  90struct blkg_policy_data {
  91        /* the blkg and policy id this per-policy data belongs to */
  92        struct blkcg_gq                 *blkg;
  93        int                             plid;
  94};
  95
  96/*
  97 * Policies that need to keep per-blkcg data which is independent from any
  98 * request_queue associated to it should implement cpd_alloc/free_fn()
  99 * methods.  A policy can allocate private data area by allocating larger
 100 * data structure which embeds blkcg_policy_data at the beginning.
 101 * cpd_init() is invoked to let each policy handle per-blkcg data.
 102 */
 103struct blkcg_policy_data {
 104        /* the blkcg and policy id this per-policy data belongs to */
 105        struct blkcg                    *blkcg;
 106        int                             plid;
 107};
 108
 109/* association between a blk cgroup and a request queue */
 110struct blkcg_gq {
 111        /* Pointer to the associated request_queue */
 112        struct request_queue            *q;
 113        struct list_head                q_node;
 114        struct hlist_node               blkcg_node;
 115        struct blkcg                    *blkcg;
 116
 117        /* all non-root blkcg_gq's are guaranteed to have access to parent */
 118        struct blkcg_gq                 *parent;
 119
 120        /* reference count */
 121        struct percpu_ref               refcnt;
 122
 123        /* is this blkg online? protected by both blkcg and q locks */
 124        bool                            online;
 125
 126        struct blkg_iostat_set __percpu *iostat_cpu;
 127        struct blkg_iostat_set          iostat;
 128
 129        struct blkg_policy_data         *pd[BLKCG_MAX_POLS];
 130
 131        spinlock_t                      async_bio_lock;
 132        struct bio_list                 async_bios;
 133        struct work_struct              async_bio_work;
 134
 135        atomic_t                        use_delay;
 136        atomic64_t                      delay_nsec;
 137        atomic64_t                      delay_start;
 138        u64                             last_delay;
 139        int                             last_use;
 140
 141        struct rcu_head                 rcu_head;
 142};
 143
 144typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
 145typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
 146typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
 147typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
 148typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
 149                                struct request_queue *q, struct blkcg *blkcg);
 150typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
 151typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
 152typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
 153typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
 154typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
 155typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
 156                                      size_t size);
 157
 158struct blkcg_policy {
 159        int                             plid;
 160        /* cgroup files for the policy */
 161        struct cftype                   *dfl_cftypes;
 162        struct cftype                   *legacy_cftypes;
 163
 164        /* operations */
 165        blkcg_pol_alloc_cpd_fn          *cpd_alloc_fn;
 166        blkcg_pol_init_cpd_fn           *cpd_init_fn;
 167        blkcg_pol_free_cpd_fn           *cpd_free_fn;
 168        blkcg_pol_bind_cpd_fn           *cpd_bind_fn;
 169
 170        blkcg_pol_alloc_pd_fn           *pd_alloc_fn;
 171        blkcg_pol_init_pd_fn            *pd_init_fn;
 172        blkcg_pol_online_pd_fn          *pd_online_fn;
 173        blkcg_pol_offline_pd_fn         *pd_offline_fn;
 174        blkcg_pol_free_pd_fn            *pd_free_fn;
 175        blkcg_pol_reset_pd_stats_fn     *pd_reset_stats_fn;
 176        blkcg_pol_stat_pd_fn            *pd_stat_fn;
 177};
 178
 179extern struct blkcg blkcg_root;
 180extern struct cgroup_subsys_state * const blkcg_root_css;
 181extern bool blkcg_debug_stats;
 182
 183struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 184                                      struct request_queue *q, bool update_hint);
 185int blkcg_init_queue(struct request_queue *q);
 186void blkcg_exit_queue(struct request_queue *q);
 187
 188/* Blkio controller policy registration */
 189int blkcg_policy_register(struct blkcg_policy *pol);
 190void blkcg_policy_unregister(struct blkcg_policy *pol);
 191int blkcg_activate_policy(struct request_queue *q,
 192                          const struct blkcg_policy *pol);
 193void blkcg_deactivate_policy(struct request_queue *q,
 194                             const struct blkcg_policy *pol);
 195
 196const char *blkg_dev_name(struct blkcg_gq *blkg);
 197void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 198                       u64 (*prfill)(struct seq_file *,
 199                                     struct blkg_policy_data *, int),
 200                       const struct blkcg_policy *pol, int data,
 201                       bool show_total);
 202u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
 203
 204struct blkg_conf_ctx {
 205        struct block_device             *bdev;
 206        struct blkcg_gq                 *blkg;
 207        char                            *body;
 208};
 209
 210struct block_device *blkcg_conf_open_bdev(char **inputp);
 211int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 212                   char *input, struct blkg_conf_ctx *ctx);
 213void blkg_conf_finish(struct blkg_conf_ctx *ctx);
 214
 215/**
 216 * blkcg_css - find the current css
 217 *
 218 * Find the css associated with either the kthread or the current task.
 219 * This may return a dying css, so it is up to the caller to use tryget logic
 220 * to confirm it is alive and well.
 221 */
 222static inline struct cgroup_subsys_state *blkcg_css(void)
 223{
 224        struct cgroup_subsys_state *css;
 225
 226        css = kthread_blkcg();
 227        if (css)
 228                return css;
 229        return task_css(current, io_cgrp_id);
 230}
 231
 232static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
 233{
 234        return css ? container_of(css, struct blkcg, css) : NULL;
 235}
 236
 237/**
 238 * __bio_blkcg - internal, inconsistent version to get blkcg
 239 *
 240 * DO NOT USE.
 241 * This function is inconsistent and consequently is dangerous to use.  The
 242 * first part of the function returns a blkcg where a reference is owned by the
 243 * bio.  This means it does not need to be rcu protected as it cannot go away
 244 * with the bio owning a reference to it.  However, the latter potentially gets
 245 * it from task_css().  This can race against task migration and the cgroup
 246 * dying.  It is also semantically different as it must be called rcu protected
 247 * and is susceptible to failure when trying to get a reference to it.
 248 * Therefore, it is not ok to assume that *_get() will always succeed on the
 249 * blkcg returned here.
 250 */
 251static inline struct blkcg *__bio_blkcg(struct bio *bio)
 252{
 253        if (bio && bio->bi_blkg)
 254                return bio->bi_blkg->blkcg;
 255        return css_to_blkcg(blkcg_css());
 256}
 257
 258/**
 259 * bio_blkcg - grab the blkcg associated with a bio
 260 * @bio: target bio
 261 *
 262 * This returns the blkcg associated with a bio, %NULL if not associated.
 263 * Callers are expected to either handle %NULL or know association has been
 264 * done prior to calling this.
 265 */
 266static inline struct blkcg *bio_blkcg(struct bio *bio)
 267{
 268        if (bio && bio->bi_blkg)
 269                return bio->bi_blkg->blkcg;
 270        return NULL;
 271}
 272
 273static inline bool blk_cgroup_congested(void)
 274{
 275        struct cgroup_subsys_state *css;
 276        bool ret = false;
 277
 278        rcu_read_lock();
 279        css = kthread_blkcg();
 280        if (!css)
 281                css = task_css(current, io_cgrp_id);
 282        while (css) {
 283                if (atomic_read(&css->cgroup->congestion_count)) {
 284                        ret = true;
 285                        break;
 286                }
 287                css = css->parent;
 288        }
 289        rcu_read_unlock();
 290        return ret;
 291}
 292
 293/**
 294 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
 295 * @return: true if this bio needs to be submitted with the root blkg context.
 296 *
 297 * In order to avoid priority inversions we sometimes need to issue a bio as if
 298 * it were attached to the root blkg, and then backcharge to the actual owning
 299 * blkg.  The idea is we do bio_blkcg() to look up the actual context for the
 300 * bio and attach the appropriate blkg to the bio.  Then we call this helper and
 301 * if it is true run with the root blkg for that queue and then do any
 302 * backcharging to the originating cgroup once the io is complete.
 303 */
 304static inline bool bio_issue_as_root_blkg(struct bio *bio)
 305{
 306        return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
 307}
 308
 309/**
 310 * blkcg_parent - get the parent of a blkcg
 311 * @blkcg: blkcg of interest
 312 *
 313 * Return the parent blkcg of @blkcg.  Can be called anytime.
 314 */
 315static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
 316{
 317        return css_to_blkcg(blkcg->css.parent);
 318}
 319
 320/**
 321 * __blkg_lookup - internal version of blkg_lookup()
 322 * @blkcg: blkcg of interest
 323 * @q: request_queue of interest
 324 * @update_hint: whether to update lookup hint with the result or not
 325 *
 326 * This is internal version and shouldn't be used by policy
 327 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 328 * @q's bypass state.  If @update_hint is %true, the caller should be
 329 * holding @q->queue_lock and lookup hint is updated on success.
 330 */
 331static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
 332                                             struct request_queue *q,
 333                                             bool update_hint)
 334{
 335        struct blkcg_gq *blkg;
 336
 337        if (blkcg == &blkcg_root)
 338                return q->root_blkg;
 339
 340        blkg = rcu_dereference(blkcg->blkg_hint);
 341        if (blkg && blkg->q == q)
 342                return blkg;
 343
 344        return blkg_lookup_slowpath(blkcg, q, update_hint);
 345}
 346
 347/**
 348 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 349 * @blkcg: blkcg of interest
 350 * @q: request_queue of interest
 351 *
 352 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 353 * under RCU read lock.
 354 */
 355static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
 356                                           struct request_queue *q)
 357{
 358        WARN_ON_ONCE(!rcu_read_lock_held());
 359        return __blkg_lookup(blkcg, q, false);
 360}
 361
 362/**
 363 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
 364 * @q: request_queue of interest
 365 *
 366 * Lookup blkg for @q at the root level. See also blkg_lookup().
 367 */
 368static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
 369{
 370        return q->root_blkg;
 371}
 372
 373/**
 374 * blkg_to_pdata - get policy private data
 375 * @blkg: blkg of interest
 376 * @pol: policy of interest
 377 *
 378 * Return pointer to private data associated with the @blkg-@pol pair.
 379 */
 380static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
 381                                                  struct blkcg_policy *pol)
 382{
 383        return blkg ? blkg->pd[pol->plid] : NULL;
 384}
 385
 386static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
 387                                                     struct blkcg_policy *pol)
 388{
 389        return blkcg ? blkcg->cpd[pol->plid] : NULL;
 390}
 391
 392/**
 393 * pdata_to_blkg - get blkg associated with policy private data
 394 * @pd: policy private data of interest
 395 *
 396 * @pd is policy private data.  Determine the blkg it's associated with.
 397 */
 398static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
 399{
 400        return pd ? pd->blkg : NULL;
 401}
 402
 403static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
 404{
 405        return cpd ? cpd->blkcg : NULL;
 406}
 407
 408extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
 409
 410/**
 411 * blkcg_pin_online - pin online state
 412 * @blkcg: blkcg of interest
 413 *
 414 * While pinned, a blkcg is kept online.  This is primarily used to
 415 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
 416 * while an associated cgwb is still active.
 417 */
 418static inline void blkcg_pin_online(struct blkcg *blkcg)
 419{
 420        refcount_inc(&blkcg->online_pin);
 421}
 422
 423/**
 424 * blkcg_unpin_online - unpin online state
 425 * @blkcg: blkcg of interest
 426 *
 427 * This is primarily used to impedance-match blkg and cgwb lifetimes so
 428 * that blkg doesn't go offline while an associated cgwb is still active.
 429 * When this count goes to zero, all active cgwbs have finished so the
 430 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
 431 */
 432static inline void blkcg_unpin_online(struct blkcg *blkcg)
 433{
 434        do {
 435                if (!refcount_dec_and_test(&blkcg->online_pin))
 436                        break;
 437                blkcg_destroy_blkgs(blkcg);
 438                blkcg = blkcg_parent(blkcg);
 439        } while (blkcg);
 440}
 441
 442/**
 443 * blkg_path - format cgroup path of blkg
 444 * @blkg: blkg of interest
 445 * @buf: target buffer
 446 * @buflen: target buffer length
 447 *
 448 * Format the path of the cgroup of @blkg into @buf.
 449 */
 450static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
 451{
 452        return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
 453}
 454
 455/**
 456 * blkg_get - get a blkg reference
 457 * @blkg: blkg to get
 458 *
 459 * The caller should be holding an existing reference.
 460 */
 461static inline void blkg_get(struct blkcg_gq *blkg)
 462{
 463        percpu_ref_get(&blkg->refcnt);
 464}
 465
 466/**
 467 * blkg_tryget - try and get a blkg reference
 468 * @blkg: blkg to get
 469 *
 470 * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
 471 * of freeing this blkg, so we can only use it if the refcnt is not zero.
 472 */
 473static inline bool blkg_tryget(struct blkcg_gq *blkg)
 474{
 475        return blkg && percpu_ref_tryget(&blkg->refcnt);
 476}
 477
 478/**
 479 * blkg_put - put a blkg reference
 480 * @blkg: blkg to put
 481 */
 482static inline void blkg_put(struct blkcg_gq *blkg)
 483{
 484        percpu_ref_put(&blkg->refcnt);
 485}
 486
 487/**
 488 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 489 * @d_blkg: loop cursor pointing to the current descendant
 490 * @pos_css: used for iteration
 491 * @p_blkg: target blkg to walk descendants of
 492 *
 493 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 494 * read locked.  If called under either blkcg or queue lock, the iteration
 495 * is guaranteed to include all and only online blkgs.  The caller may
 496 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
 497 * @p_blkg is included in the iteration and the first node to be visited.
 498 */
 499#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)           \
 500        css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)   \
 501                if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
 502                                              (p_blkg)->q, false)))
 503
 504/**
 505 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 506 * @d_blkg: loop cursor pointing to the current descendant
 507 * @pos_css: used for iteration
 508 * @p_blkg: target blkg to walk descendants of
 509 *
 510 * Similar to blkg_for_each_descendant_pre() but performs post-order
 511 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 512 * included in the iteration and the last node to be visited.
 513 */
 514#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)          \
 515        css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)  \
 516                if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
 517                                              (p_blkg)->q, false)))
 518
 519bool __blkcg_punt_bio_submit(struct bio *bio);
 520
 521static inline bool blkcg_punt_bio_submit(struct bio *bio)
 522{
 523        if (bio->bi_opf & REQ_CGROUP_PUNT)
 524                return __blkcg_punt_bio_submit(bio);
 525        else
 526                return false;
 527}
 528
 529static inline void blkcg_bio_issue_init(struct bio *bio)
 530{
 531        bio_issue_init(&bio->bi_issue, bio_sectors(bio));
 532}
 533
 534static inline void blkcg_use_delay(struct blkcg_gq *blkg)
 535{
 536        if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
 537                return;
 538        if (atomic_add_return(1, &blkg->use_delay) == 1)
 539                atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
 540}
 541
 542static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
 543{
 544        int old = atomic_read(&blkg->use_delay);
 545
 546        if (WARN_ON_ONCE(old < 0))
 547                return 0;
 548        if (old == 0)
 549                return 0;
 550
 551        /*
 552         * We do this song and dance because we can race with somebody else
 553         * adding or removing delay.  If we just did an atomic_dec we'd end up
 554         * negative and we'd already be in trouble.  We need to subtract 1 and
 555         * then check to see if we were the last delay so we can drop the
 556         * congestion count on the cgroup.
 557         */
 558        while (old) {
 559                int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
 560                if (cur == old)
 561                        break;
 562                old = cur;
 563        }
 564
 565        if (old == 0)
 566                return 0;
 567        if (old == 1)
 568                atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
 569        return 1;
 570}
 571
 572/**
 573 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
 574 * @blkg: target blkg
 575 * @delay: delay duration in nsecs
 576 *
 577 * When enabled with this function, the delay is not decayed and must be
 578 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
 579 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
 580 */
 581static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
 582{
 583        int old = atomic_read(&blkg->use_delay);
 584
 585        /* We only want 1 person setting the congestion count for this blkg. */
 586        if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
 587                atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
 588
 589        atomic64_set(&blkg->delay_nsec, delay);
 590}
 591
 592/**
 593 * blkcg_clear_delay - Disable allocator delay mechanism
 594 * @blkg: target blkg
 595 *
 596 * Disable use_delay mechanism. See blkcg_set_delay().
 597 */
 598static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
 599{
 600        int old = atomic_read(&blkg->use_delay);
 601
 602        /* We only want 1 person clearing the congestion count for this blkg. */
 603        if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
 604                atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
 605}
 606
 607void blk_cgroup_bio_start(struct bio *bio);
 608void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
 609void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
 610void blkcg_maybe_throttle_current(void);
 611#else   /* CONFIG_BLK_CGROUP */
 612
 613struct blkcg {
 614};
 615
 616struct blkg_policy_data {
 617};
 618
 619struct blkcg_policy_data {
 620};
 621
 622struct blkcg_gq {
 623};
 624
 625struct blkcg_policy {
 626};
 627
 628#define blkcg_root_css  ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
 629
 630static inline void blkcg_maybe_throttle_current(void) { }
 631static inline bool blk_cgroup_congested(void) { return false; }
 632
 633#ifdef CONFIG_BLOCK
 634
 635static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
 636
 637static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
 638static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
 639{ return NULL; }
 640static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
 641static inline void blkcg_exit_queue(struct request_queue *q) { }
 642static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
 643static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
 644static inline int blkcg_activate_policy(struct request_queue *q,
 645                                        const struct blkcg_policy *pol) { return 0; }
 646static inline void blkcg_deactivate_policy(struct request_queue *q,
 647                                           const struct blkcg_policy *pol) { }
 648
 649static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
 650static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
 651
 652static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
 653                                                  struct blkcg_policy *pol) { return NULL; }
 654static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
 655static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
 656static inline void blkg_get(struct blkcg_gq *blkg) { }
 657static inline void blkg_put(struct blkcg_gq *blkg) { }
 658
 659static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
 660static inline void blkcg_bio_issue_init(struct bio *bio) { }
 661static inline void blk_cgroup_bio_start(struct bio *bio) { }
 662
 663#define blk_queue_for_each_rl(rl, q)    \
 664        for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
 665
 666#endif  /* CONFIG_BLOCK */
 667#endif  /* CONFIG_BLK_CGROUP */
 668
 669#ifdef CONFIG_BLK_CGROUP_FC_APPID
 670/*
 671 * Sets the fc_app_id field associted to blkcg
 672 * @app_id: application identifier
 673 * @cgrp_id: cgroup id
 674 * @app_id_len: size of application identifier
 675 */
 676static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len)
 677{
 678        struct cgroup *cgrp;
 679        struct cgroup_subsys_state *css;
 680        struct blkcg *blkcg;
 681        int ret  = 0;
 682
 683        if (app_id_len > FC_APPID_LEN)
 684                return -EINVAL;
 685
 686        cgrp = cgroup_get_from_id(cgrp_id);
 687        if (!cgrp)
 688                return -ENOENT;
 689        css = cgroup_get_e_css(cgrp, &io_cgrp_subsys);
 690        if (!css) {
 691                ret = -ENOENT;
 692                goto out_cgrp_put;
 693        }
 694        blkcg = css_to_blkcg(css);
 695        /*
 696         * There is a slight race condition on setting the appid.
 697         * Worst case an I/O may not find the right id.
 698         * This is no different from the I/O we let pass while obtaining
 699         * the vmid from the fabric.
 700         * Adding the overhead of a lock is not necessary.
 701         */
 702        strlcpy(blkcg->fc_app_id, app_id, app_id_len);
 703        css_put(css);
 704out_cgrp_put:
 705        cgroup_put(cgrp);
 706        return ret;
 707}
 708
 709/**
 710 * blkcg_get_fc_appid - get the fc app identifier associated with a bio
 711 * @bio: target bio
 712 *
 713 * On success return the fc_app_id, on failure return NULL
 714 */
 715static inline char *blkcg_get_fc_appid(struct bio *bio)
 716{
 717        if (bio && bio->bi_blkg &&
 718                (bio->bi_blkg->blkcg->fc_app_id[0] != '\0'))
 719                return bio->bi_blkg->blkcg->fc_app_id;
 720        return NULL;
 721}
 722#else
 723static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; }
 724static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; }
 725#endif /*CONFIG_BLK_CGROUP_FC_APPID*/
 726#endif  /* _BLK_CGROUP_H */
 727