linux/include/linux/blk-cgroup.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _BLK_CGROUP_H
   3#define _BLK_CGROUP_H
   4/*
   5 * Common Block IO controller cgroup interface
   6 *
   7 * Based on ideas and code from CFQ, CFS and BFQ:
   8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   9 *
  10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  11 *                    Paolo Valente <paolo.valente@unimore.it>
  12 *
  13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  14 *                    Nauman Rafique <nauman@google.com>
  15 */
  16
  17#include <linux/cgroup.h>
  18#include <linux/percpu.h>
  19#include <linux/percpu_counter.h>
  20#include <linux/u64_stats_sync.h>
  21#include <linux/seq_file.h>
  22#include <linux/radix-tree.h>
  23#include <linux/blkdev.h>
  24#include <linux/atomic.h>
  25#include <linux/kthread.h>
  26#include <linux/fs.h>
  27
  28/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
  29#define BLKG_STAT_CPU_BATCH     (INT_MAX / 2)
  30
  31/* Max limits for throttle policy */
  32#define THROTL_IOPS_MAX         UINT_MAX
  33
  34#ifdef CONFIG_BLK_CGROUP
  35
  36enum blkg_iostat_type {
  37        BLKG_IOSTAT_READ,
  38        BLKG_IOSTAT_WRITE,
  39        BLKG_IOSTAT_DISCARD,
  40
  41        BLKG_IOSTAT_NR,
  42};
  43
  44struct blkcg_gq;
  45
  46struct blkcg {
  47        struct cgroup_subsys_state      css;
  48        spinlock_t                      lock;
  49        refcount_t                      online_pin;
  50
  51        struct radix_tree_root          blkg_tree;
  52        struct blkcg_gq __rcu           *blkg_hint;
  53        struct hlist_head               blkg_list;
  54
  55        struct blkcg_policy_data        *cpd[BLKCG_MAX_POLS];
  56
  57        struct list_head                all_blkcgs_node;
  58#ifdef CONFIG_CGROUP_WRITEBACK
  59        struct list_head                cgwb_list;
  60#endif
  61};
  62
  63struct blkg_iostat {
  64        u64                             bytes[BLKG_IOSTAT_NR];
  65        u64                             ios[BLKG_IOSTAT_NR];
  66};
  67
  68struct blkg_iostat_set {
  69        struct u64_stats_sync           sync;
  70        struct blkg_iostat              cur;
  71        struct blkg_iostat              last;
  72};
  73
  74/*
  75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
  76 * request_queue (q).  This is used by blkcg policies which need to track
  77 * information per blkcg - q pair.
  78 *
  79 * There can be multiple active blkcg policies and each blkg:policy pair is
  80 * represented by a blkg_policy_data which is allocated and freed by each
  81 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
  82 * area by allocating larger data structure which embeds blkg_policy_data
  83 * at the beginning.
  84 */
  85struct blkg_policy_data {
  86        /* the blkg and policy id this per-policy data belongs to */
  87        struct blkcg_gq                 *blkg;
  88        int                             plid;
  89};
  90
  91/*
  92 * Policies that need to keep per-blkcg data which is independent from any
  93 * request_queue associated to it should implement cpd_alloc/free_fn()
  94 * methods.  A policy can allocate private data area by allocating larger
  95 * data structure which embeds blkcg_policy_data at the beginning.
  96 * cpd_init() is invoked to let each policy handle per-blkcg data.
  97 */
  98struct blkcg_policy_data {
  99        /* the blkcg and policy id this per-policy data belongs to */
 100        struct blkcg                    *blkcg;
 101        int                             plid;
 102};
 103
 104/* association between a blk cgroup and a request queue */
 105struct blkcg_gq {
 106        /* Pointer to the associated request_queue */
 107        struct request_queue            *q;
 108        struct list_head                q_node;
 109        struct hlist_node               blkcg_node;
 110        struct blkcg                    *blkcg;
 111
 112        /* all non-root blkcg_gq's are guaranteed to have access to parent */
 113        struct blkcg_gq                 *parent;
 114
 115        /* reference count */
 116        struct percpu_ref               refcnt;
 117
 118        /* is this blkg online? protected by both blkcg and q locks */
 119        bool                            online;
 120
 121        struct blkg_iostat_set __percpu *iostat_cpu;
 122        struct blkg_iostat_set          iostat;
 123
 124        struct blkg_policy_data         *pd[BLKCG_MAX_POLS];
 125
 126        spinlock_t                      async_bio_lock;
 127        struct bio_list                 async_bios;
 128        struct work_struct              async_bio_work;
 129
 130        atomic_t                        use_delay;
 131        atomic64_t                      delay_nsec;
 132        atomic64_t                      delay_start;
 133        u64                             last_delay;
 134        int                             last_use;
 135
 136        struct rcu_head                 rcu_head;
 137};
 138
 139typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
 140typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
 141typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
 142typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
 143typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
 144                                struct request_queue *q, struct blkcg *blkcg);
 145typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
 146typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
 147typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
 148typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
 149typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
 150typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
 151                                      size_t size);
 152
 153struct blkcg_policy {
 154        int                             plid;
 155        /* cgroup files for the policy */
 156        struct cftype                   *dfl_cftypes;
 157        struct cftype                   *legacy_cftypes;
 158
 159        /* operations */
 160        blkcg_pol_alloc_cpd_fn          *cpd_alloc_fn;
 161        blkcg_pol_init_cpd_fn           *cpd_init_fn;
 162        blkcg_pol_free_cpd_fn           *cpd_free_fn;
 163        blkcg_pol_bind_cpd_fn           *cpd_bind_fn;
 164
 165        blkcg_pol_alloc_pd_fn           *pd_alloc_fn;
 166        blkcg_pol_init_pd_fn            *pd_init_fn;
 167        blkcg_pol_online_pd_fn          *pd_online_fn;
 168        blkcg_pol_offline_pd_fn         *pd_offline_fn;
 169        blkcg_pol_free_pd_fn            *pd_free_fn;
 170        blkcg_pol_reset_pd_stats_fn     *pd_reset_stats_fn;
 171        blkcg_pol_stat_pd_fn            *pd_stat_fn;
 172};
 173
 174extern struct blkcg blkcg_root;
 175extern struct cgroup_subsys_state * const blkcg_root_css;
 176extern bool blkcg_debug_stats;
 177
 178struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 179                                      struct request_queue *q, bool update_hint);
 180int blkcg_init_queue(struct request_queue *q);
 181void blkcg_exit_queue(struct request_queue *q);
 182
 183/* Blkio controller policy registration */
 184int blkcg_policy_register(struct blkcg_policy *pol);
 185void blkcg_policy_unregister(struct blkcg_policy *pol);
 186int blkcg_activate_policy(struct request_queue *q,
 187                          const struct blkcg_policy *pol);
 188void blkcg_deactivate_policy(struct request_queue *q,
 189                             const struct blkcg_policy *pol);
 190
 191const char *blkg_dev_name(struct blkcg_gq *blkg);
 192void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 193                       u64 (*prfill)(struct seq_file *,
 194                                     struct blkg_policy_data *, int),
 195                       const struct blkcg_policy *pol, int data,
 196                       bool show_total);
 197u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
 198
 199struct blkg_conf_ctx {
 200        struct gendisk                  *disk;
 201        struct blkcg_gq                 *blkg;
 202        char                            *body;
 203};
 204
 205struct gendisk *blkcg_conf_get_disk(char **inputp);
 206int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 207                   char *input, struct blkg_conf_ctx *ctx);
 208void blkg_conf_finish(struct blkg_conf_ctx *ctx);
 209
 210/**
 211 * blkcg_css - find the current css
 212 *
 213 * Find the css associated with either the kthread or the current task.
 214 * This may return a dying css, so it is up to the caller to use tryget logic
 215 * to confirm it is alive and well.
 216 */
 217static inline struct cgroup_subsys_state *blkcg_css(void)
 218{
 219        struct cgroup_subsys_state *css;
 220
 221        css = kthread_blkcg();
 222        if (css)
 223                return css;
 224        return task_css(current, io_cgrp_id);
 225}
 226
 227static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
 228{
 229        return css ? container_of(css, struct blkcg, css) : NULL;
 230}
 231
 232/**
 233 * __bio_blkcg - internal, inconsistent version to get blkcg
 234 *
 235 * DO NOT USE.
 236 * This function is inconsistent and consequently is dangerous to use.  The
 237 * first part of the function returns a blkcg where a reference is owned by the
 238 * bio.  This means it does not need to be rcu protected as it cannot go away
 239 * with the bio owning a reference to it.  However, the latter potentially gets
 240 * it from task_css().  This can race against task migration and the cgroup
 241 * dying.  It is also semantically different as it must be called rcu protected
 242 * and is susceptible to failure when trying to get a reference to it.
 243 * Therefore, it is not ok to assume that *_get() will always succeed on the
 244 * blkcg returned here.
 245 */
 246static inline struct blkcg *__bio_blkcg(struct bio *bio)
 247{
 248        if (bio && bio->bi_blkg)
 249                return bio->bi_blkg->blkcg;
 250        return css_to_blkcg(blkcg_css());
 251}
 252
 253/**
 254 * bio_blkcg - grab the blkcg associated with a bio
 255 * @bio: target bio
 256 *
 257 * This returns the blkcg associated with a bio, %NULL if not associated.
 258 * Callers are expected to either handle %NULL or know association has been
 259 * done prior to calling this.
 260 */
 261static inline struct blkcg *bio_blkcg(struct bio *bio)
 262{
 263        if (bio && bio->bi_blkg)
 264                return bio->bi_blkg->blkcg;
 265        return NULL;
 266}
 267
 268static inline bool blk_cgroup_congested(void)
 269{
 270        struct cgroup_subsys_state *css;
 271        bool ret = false;
 272
 273        rcu_read_lock();
 274        css = kthread_blkcg();
 275        if (!css)
 276                css = task_css(current, io_cgrp_id);
 277        while (css) {
 278                if (atomic_read(&css->cgroup->congestion_count)) {
 279                        ret = true;
 280                        break;
 281                }
 282                css = css->parent;
 283        }
 284        rcu_read_unlock();
 285        return ret;
 286}
 287
 288/**
 289 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
 290 * @return: true if this bio needs to be submitted with the root blkg context.
 291 *
 292 * In order to avoid priority inversions we sometimes need to issue a bio as if
 293 * it were attached to the root blkg, and then backcharge to the actual owning
 294 * blkg.  The idea is we do bio_blkcg() to look up the actual context for the
 295 * bio and attach the appropriate blkg to the bio.  Then we call this helper and
 296 * if it is true run with the root blkg for that queue and then do any
 297 * backcharging to the originating cgroup once the io is complete.
 298 */
 299static inline bool bio_issue_as_root_blkg(struct bio *bio)
 300{
 301        return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
 302}
 303
 304/**
 305 * blkcg_parent - get the parent of a blkcg
 306 * @blkcg: blkcg of interest
 307 *
 308 * Return the parent blkcg of @blkcg.  Can be called anytime.
 309 */
 310static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
 311{
 312        return css_to_blkcg(blkcg->css.parent);
 313}
 314
 315/**
 316 * __blkg_lookup - internal version of blkg_lookup()
 317 * @blkcg: blkcg of interest
 318 * @q: request_queue of interest
 319 * @update_hint: whether to update lookup hint with the result or not
 320 *
 321 * This is internal version and shouldn't be used by policy
 322 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 323 * @q's bypass state.  If @update_hint is %true, the caller should be
 324 * holding @q->queue_lock and lookup hint is updated on success.
 325 */
 326static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
 327                                             struct request_queue *q,
 328                                             bool update_hint)
 329{
 330        struct blkcg_gq *blkg;
 331
 332        if (blkcg == &blkcg_root)
 333                return q->root_blkg;
 334
 335        blkg = rcu_dereference(blkcg->blkg_hint);
 336        if (blkg && blkg->q == q)
 337                return blkg;
 338
 339        return blkg_lookup_slowpath(blkcg, q, update_hint);
 340}
 341
 342/**
 343 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 344 * @blkcg: blkcg of interest
 345 * @q: request_queue of interest
 346 *
 347 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 348 * under RCU read lock.
 349 */
 350static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
 351                                           struct request_queue *q)
 352{
 353        WARN_ON_ONCE(!rcu_read_lock_held());
 354        return __blkg_lookup(blkcg, q, false);
 355}
 356
 357/**
 358 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
 359 * @q: request_queue of interest
 360 *
 361 * Lookup blkg for @q at the root level. See also blkg_lookup().
 362 */
 363static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
 364{
 365        return q->root_blkg;
 366}
 367
 368/**
 369 * blkg_to_pdata - get policy private data
 370 * @blkg: blkg of interest
 371 * @pol: policy of interest
 372 *
 373 * Return pointer to private data associated with the @blkg-@pol pair.
 374 */
 375static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
 376                                                  struct blkcg_policy *pol)
 377{
 378        return blkg ? blkg->pd[pol->plid] : NULL;
 379}
 380
 381static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
 382                                                     struct blkcg_policy *pol)
 383{
 384        return blkcg ? blkcg->cpd[pol->plid] : NULL;
 385}
 386
 387/**
 388 * pdata_to_blkg - get blkg associated with policy private data
 389 * @pd: policy private data of interest
 390 *
 391 * @pd is policy private data.  Determine the blkg it's associated with.
 392 */
 393static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
 394{
 395        return pd ? pd->blkg : NULL;
 396}
 397
 398static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
 399{
 400        return cpd ? cpd->blkcg : NULL;
 401}
 402
 403extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
 404
 405/**
 406 * blkcg_pin_online - pin online state
 407 * @blkcg: blkcg of interest
 408 *
 409 * While pinned, a blkcg is kept online.  This is primarily used to
 410 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
 411 * while an associated cgwb is still active.
 412 */
 413static inline void blkcg_pin_online(struct blkcg *blkcg)
 414{
 415        refcount_inc(&blkcg->online_pin);
 416}
 417
 418/**
 419 * blkcg_unpin_online - unpin online state
 420 * @blkcg: blkcg of interest
 421 *
 422 * This is primarily used to impedance-match blkg and cgwb lifetimes so
 423 * that blkg doesn't go offline while an associated cgwb is still active.
 424 * When this count goes to zero, all active cgwbs have finished so the
 425 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
 426 */
 427static inline void blkcg_unpin_online(struct blkcg *blkcg)
 428{
 429        do {
 430                if (!refcount_dec_and_test(&blkcg->online_pin))
 431                        break;
 432                blkcg_destroy_blkgs(blkcg);
 433                blkcg = blkcg_parent(blkcg);
 434        } while (blkcg);
 435}
 436
 437/**
 438 * blkg_path - format cgroup path of blkg
 439 * @blkg: blkg of interest
 440 * @buf: target buffer
 441 * @buflen: target buffer length
 442 *
 443 * Format the path of the cgroup of @blkg into @buf.
 444 */
 445static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
 446{
 447        return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
 448}
 449
 450/**
 451 * blkg_get - get a blkg reference
 452 * @blkg: blkg to get
 453 *
 454 * The caller should be holding an existing reference.
 455 */
 456static inline void blkg_get(struct blkcg_gq *blkg)
 457{
 458        percpu_ref_get(&blkg->refcnt);
 459}
 460
 461/**
 462 * blkg_tryget - try and get a blkg reference
 463 * @blkg: blkg to get
 464 *
 465 * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
 466 * of freeing this blkg, so we can only use it if the refcnt is not zero.
 467 */
 468static inline bool blkg_tryget(struct blkcg_gq *blkg)
 469{
 470        return blkg && percpu_ref_tryget(&blkg->refcnt);
 471}
 472
 473/**
 474 * blkg_put - put a blkg reference
 475 * @blkg: blkg to put
 476 */
 477static inline void blkg_put(struct blkcg_gq *blkg)
 478{
 479        percpu_ref_put(&blkg->refcnt);
 480}
 481
 482/**
 483 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 484 * @d_blkg: loop cursor pointing to the current descendant
 485 * @pos_css: used for iteration
 486 * @p_blkg: target blkg to walk descendants of
 487 *
 488 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 489 * read locked.  If called under either blkcg or queue lock, the iteration
 490 * is guaranteed to include all and only online blkgs.  The caller may
 491 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
 492 * @p_blkg is included in the iteration and the first node to be visited.
 493 */
 494#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)           \
 495        css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)   \
 496                if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
 497                                              (p_blkg)->q, false)))
 498
 499/**
 500 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 501 * @d_blkg: loop cursor pointing to the current descendant
 502 * @pos_css: used for iteration
 503 * @p_blkg: target blkg to walk descendants of
 504 *
 505 * Similar to blkg_for_each_descendant_pre() but performs post-order
 506 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 507 * included in the iteration and the last node to be visited.
 508 */
 509#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)          \
 510        css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)  \
 511                if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
 512                                              (p_blkg)->q, false)))
 513
 514bool __blkcg_punt_bio_submit(struct bio *bio);
 515
 516static inline bool blkcg_punt_bio_submit(struct bio *bio)
 517{
 518        if (bio->bi_opf & REQ_CGROUP_PUNT)
 519                return __blkcg_punt_bio_submit(bio);
 520        else
 521                return false;
 522}
 523
 524static inline void blkcg_bio_issue_init(struct bio *bio)
 525{
 526        bio_issue_init(&bio->bi_issue, bio_sectors(bio));
 527}
 528
 529static inline void blkcg_use_delay(struct blkcg_gq *blkg)
 530{
 531        if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
 532                return;
 533        if (atomic_add_return(1, &blkg->use_delay) == 1)
 534                atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
 535}
 536
 537static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
 538{
 539        int old = atomic_read(&blkg->use_delay);
 540
 541        if (WARN_ON_ONCE(old < 0))
 542                return 0;
 543        if (old == 0)
 544                return 0;
 545
 546        /*
 547         * We do this song and dance because we can race with somebody else
 548         * adding or removing delay.  If we just did an atomic_dec we'd end up
 549         * negative and we'd already be in trouble.  We need to subtract 1 and
 550         * then check to see if we were the last delay so we can drop the
 551         * congestion count on the cgroup.
 552         */
 553        while (old) {
 554                int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
 555                if (cur == old)
 556                        break;
 557                old = cur;
 558        }
 559
 560        if (old == 0)
 561                return 0;
 562        if (old == 1)
 563                atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
 564        return 1;
 565}
 566
 567/**
 568 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
 569 * @blkg: target blkg
 570 * @delay: delay duration in nsecs
 571 *
 572 * When enabled with this function, the delay is not decayed and must be
 573 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
 574 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
 575 */
 576static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
 577{
 578        int old = atomic_read(&blkg->use_delay);
 579
 580        /* We only want 1 person setting the congestion count for this blkg. */
 581        if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
 582                atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
 583
 584        atomic64_set(&blkg->delay_nsec, delay);
 585}
 586
 587/**
 588 * blkcg_clear_delay - Disable allocator delay mechanism
 589 * @blkg: target blkg
 590 *
 591 * Disable use_delay mechanism. See blkcg_set_delay().
 592 */
 593static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
 594{
 595        int old = atomic_read(&blkg->use_delay);
 596
 597        /* We only want 1 person clearing the congestion count for this blkg. */
 598        if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
 599                atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
 600}
 601
 602void blk_cgroup_bio_start(struct bio *bio);
 603void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
 604void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
 605void blkcg_maybe_throttle_current(void);
 606#else   /* CONFIG_BLK_CGROUP */
 607
 608struct blkcg {
 609};
 610
 611struct blkg_policy_data {
 612};
 613
 614struct blkcg_policy_data {
 615};
 616
 617struct blkcg_gq {
 618};
 619
 620struct blkcg_policy {
 621};
 622
 623#define blkcg_root_css  ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
 624
 625static inline void blkcg_maybe_throttle_current(void) { }
 626static inline bool blk_cgroup_congested(void) { return false; }
 627
 628#ifdef CONFIG_BLOCK
 629
 630static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
 631
 632static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
 633static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
 634{ return NULL; }
 635static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
 636static inline void blkcg_exit_queue(struct request_queue *q) { }
 637static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
 638static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
 639static inline int blkcg_activate_policy(struct request_queue *q,
 640                                        const struct blkcg_policy *pol) { return 0; }
 641static inline void blkcg_deactivate_policy(struct request_queue *q,
 642                                           const struct blkcg_policy *pol) { }
 643
 644static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
 645static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
 646
 647static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
 648                                                  struct blkcg_policy *pol) { return NULL; }
 649static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
 650static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
 651static inline void blkg_get(struct blkcg_gq *blkg) { }
 652static inline void blkg_put(struct blkcg_gq *blkg) { }
 653
 654static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
 655static inline void blkcg_bio_issue_init(struct bio *bio) { }
 656static inline void blk_cgroup_bio_start(struct bio *bio) { }
 657
 658#define blk_queue_for_each_rl(rl, q)    \
 659        for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
 660
 661#endif  /* CONFIG_BLOCK */
 662#endif  /* CONFIG_BLK_CGROUP */
 663#endif  /* _BLK_CGROUP_H */
 664