linux/include/linux/blk-cgroup.h
<<
>>
Prefs
   1#ifndef _BLK_CGROUP_H
   2#define _BLK_CGROUP_H
   3/*
   4 * Common Block IO controller cgroup interface
   5 *
   6 * Based on ideas and code from CFQ, CFS and BFQ:
   7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 *
   9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  10 *                    Paolo Valente <paolo.valente@unimore.it>
  11 *
  12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  13 *                    Nauman Rafique <nauman@google.com>
  14 */
  15
  16#include <linux/cgroup.h>
  17#include <linux/percpu_counter.h>
  18#include <linux/seq_file.h>
  19#include <linux/radix-tree.h>
  20#include <linux/blkdev.h>
  21#include <linux/atomic.h>
  22
  23/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
  24#define BLKG_STAT_CPU_BATCH     (INT_MAX / 2)
  25
  26/* Max limits for throttle policy */
  27#define THROTL_IOPS_MAX         UINT_MAX
  28
  29#ifdef CONFIG_BLK_CGROUP
  30
  31enum blkg_rwstat_type {
  32        BLKG_RWSTAT_READ,
  33        BLKG_RWSTAT_WRITE,
  34        BLKG_RWSTAT_SYNC,
  35        BLKG_RWSTAT_ASYNC,
  36
  37        BLKG_RWSTAT_NR,
  38        BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
  39};
  40
  41struct blkcg_gq;
  42
  43struct blkcg {
  44        struct cgroup_subsys_state      css;
  45        spinlock_t                      lock;
  46
  47        struct radix_tree_root          blkg_tree;
  48        struct blkcg_gq                 *blkg_hint;
  49        struct hlist_head               blkg_list;
  50
  51        struct blkcg_policy_data        *cpd[BLKCG_MAX_POLS];
  52
  53        struct list_head                all_blkcgs_node;
  54#ifdef CONFIG_CGROUP_WRITEBACK
  55        struct list_head                cgwb_list;
  56#endif
  57};
  58
  59/*
  60 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
  61 * recursive.  Used to carry stats of dead children, and, for blkg_rwstat,
  62 * to carry result values from read and sum operations.
  63 */
  64struct blkg_stat {
  65        struct percpu_counter           cpu_cnt;
  66        atomic64_t                      aux_cnt;
  67};
  68
  69struct blkg_rwstat {
  70        struct percpu_counter           cpu_cnt[BLKG_RWSTAT_NR];
  71        atomic64_t                      aux_cnt[BLKG_RWSTAT_NR];
  72};
  73
  74/*
  75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
  76 * request_queue (q).  This is used by blkcg policies which need to track
  77 * information per blkcg - q pair.
  78 *
  79 * There can be multiple active blkcg policies and each blkg:policy pair is
  80 * represented by a blkg_policy_data which is allocated and freed by each
  81 * policy's pd_alloc/free_fn() methods.  A policy can allocate private data
  82 * area by allocating larger data structure which embeds blkg_policy_data
  83 * at the beginning.
  84 */
  85struct blkg_policy_data {
  86        /* the blkg and policy id this per-policy data belongs to */
  87        struct blkcg_gq                 *blkg;
  88        int                             plid;
  89};
  90
  91/*
  92 * Policies that need to keep per-blkcg data which is independent from any
  93 * request_queue associated to it should implement cpd_alloc/free_fn()
  94 * methods.  A policy can allocate private data area by allocating larger
  95 * data structure which embeds blkcg_policy_data at the beginning.
  96 * cpd_init() is invoked to let each policy handle per-blkcg data.
  97 */
  98struct blkcg_policy_data {
  99        /* the blkcg and policy id this per-policy data belongs to */
 100        struct blkcg                    *blkcg;
 101        int                             plid;
 102};
 103
 104/* association between a blk cgroup and a request queue */
 105struct blkcg_gq {
 106        /* Pointer to the associated request_queue */
 107        struct request_queue            *q;
 108        struct list_head                q_node;
 109        struct hlist_node               blkcg_node;
 110        struct blkcg                    *blkcg;
 111
 112        /*
 113         * Each blkg gets congested separately and the congestion state is
 114         * propagated to the matching bdi_writeback_congested.
 115         */
 116        struct bdi_writeback_congested  *wb_congested;
 117
 118        /* all non-root blkcg_gq's are guaranteed to have access to parent */
 119        struct blkcg_gq                 *parent;
 120
 121        /* request allocation list for this blkcg-q pair */
 122        struct request_list             rl;
 123
 124        /* reference count */
 125        atomic_t                        refcnt;
 126
 127        /* is this blkg online? protected by both blkcg and q locks */
 128        bool                            online;
 129
 130        struct blkg_rwstat              stat_bytes;
 131        struct blkg_rwstat              stat_ios;
 132
 133        struct blkg_policy_data         *pd[BLKCG_MAX_POLS];
 134
 135        struct rcu_head                 rcu_head;
 136};
 137
 138typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
 139typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
 140typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
 141typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
 142typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
 143typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
 144typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
 145typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
 146typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
 147typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
 148
 149struct blkcg_policy {
 150        int                             plid;
 151        /* cgroup files for the policy */
 152        struct cftype                   *dfl_cftypes;
 153        struct cftype                   *legacy_cftypes;
 154
 155        /* operations */
 156        blkcg_pol_alloc_cpd_fn          *cpd_alloc_fn;
 157        blkcg_pol_init_cpd_fn           *cpd_init_fn;
 158        blkcg_pol_free_cpd_fn           *cpd_free_fn;
 159        blkcg_pol_bind_cpd_fn           *cpd_bind_fn;
 160
 161        blkcg_pol_alloc_pd_fn           *pd_alloc_fn;
 162        blkcg_pol_init_pd_fn            *pd_init_fn;
 163        blkcg_pol_online_pd_fn          *pd_online_fn;
 164        blkcg_pol_offline_pd_fn         *pd_offline_fn;
 165        blkcg_pol_free_pd_fn            *pd_free_fn;
 166        blkcg_pol_reset_pd_stats_fn     *pd_reset_stats_fn;
 167};
 168
 169extern struct blkcg blkcg_root;
 170extern struct cgroup_subsys_state * const blkcg_root_css;
 171
 172struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
 173                                      struct request_queue *q, bool update_hint);
 174struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 175                                    struct request_queue *q);
 176int blkcg_init_queue(struct request_queue *q);
 177void blkcg_drain_queue(struct request_queue *q);
 178void blkcg_exit_queue(struct request_queue *q);
 179
 180/* Blkio controller policy registration */
 181int blkcg_policy_register(struct blkcg_policy *pol);
 182void blkcg_policy_unregister(struct blkcg_policy *pol);
 183int blkcg_activate_policy(struct request_queue *q,
 184                          const struct blkcg_policy *pol);
 185void blkcg_deactivate_policy(struct request_queue *q,
 186                             const struct blkcg_policy *pol);
 187
 188const char *blkg_dev_name(struct blkcg_gq *blkg);
 189void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 190                       u64 (*prfill)(struct seq_file *,
 191                                     struct blkg_policy_data *, int),
 192                       const struct blkcg_policy *pol, int data,
 193                       bool show_total);
 194u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
 195u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 196                         const struct blkg_rwstat *rwstat);
 197u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
 198u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 199                       int off);
 200int blkg_print_stat_bytes(struct seq_file *sf, void *v);
 201int blkg_print_stat_ios(struct seq_file *sf, void *v);
 202int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
 203int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
 204
 205u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
 206                            struct blkcg_policy *pol, int off);
 207struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
 208                                             struct blkcg_policy *pol, int off);
 209
 210struct blkg_conf_ctx {
 211        struct gendisk                  *disk;
 212        struct blkcg_gq                 *blkg;
 213        char                            *body;
 214};
 215
 216int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 217                   char *input, struct blkg_conf_ctx *ctx);
 218void blkg_conf_finish(struct blkg_conf_ctx *ctx);
 219
 220
 221static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
 222{
 223        return css ? container_of(css, struct blkcg, css) : NULL;
 224}
 225
 226static inline struct blkcg *task_blkcg(struct task_struct *tsk)
 227{
 228        return css_to_blkcg(task_css(tsk, io_cgrp_id));
 229}
 230
 231static inline struct blkcg *bio_blkcg(struct bio *bio)
 232{
 233        if (bio && bio->bi_css)
 234                return css_to_blkcg(bio->bi_css);
 235        return task_blkcg(current);
 236}
 237
 238static inline struct cgroup_subsys_state *
 239task_get_blkcg_css(struct task_struct *task)
 240{
 241        return task_get_css(task, io_cgrp_id);
 242}
 243
 244/**
 245 * blkcg_parent - get the parent of a blkcg
 246 * @blkcg: blkcg of interest
 247 *
 248 * Return the parent blkcg of @blkcg.  Can be called anytime.
 249 */
 250static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
 251{
 252        return css_to_blkcg(blkcg->css.parent);
 253}
 254
 255/**
 256 * __blkg_lookup - internal version of blkg_lookup()
 257 * @blkcg: blkcg of interest
 258 * @q: request_queue of interest
 259 * @update_hint: whether to update lookup hint with the result or not
 260 *
 261 * This is internal version and shouldn't be used by policy
 262 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 263 * @q's bypass state.  If @update_hint is %true, the caller should be
 264 * holding @q->queue_lock and lookup hint is updated on success.
 265 */
 266static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
 267                                             struct request_queue *q,
 268                                             bool update_hint)
 269{
 270        struct blkcg_gq *blkg;
 271
 272        if (blkcg == &blkcg_root)
 273                return q->root_blkg;
 274
 275        blkg = rcu_dereference(blkcg->blkg_hint);
 276        if (blkg && blkg->q == q)
 277                return blkg;
 278
 279        return blkg_lookup_slowpath(blkcg, q, update_hint);
 280}
 281
 282/**
 283 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 284 * @blkcg: blkcg of interest
 285 * @q: request_queue of interest
 286 *
 287 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 288 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 289 * - see blk_queue_bypass_start() for details.
 290 */
 291static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
 292                                           struct request_queue *q)
 293{
 294        WARN_ON_ONCE(!rcu_read_lock_held());
 295
 296        if (unlikely(blk_queue_bypass(q)))
 297                return NULL;
 298        return __blkg_lookup(blkcg, q, false);
 299}
 300
 301/**
 302 * blkg_to_pdata - get policy private data
 303 * @blkg: blkg of interest
 304 * @pol: policy of interest
 305 *
 306 * Return pointer to private data associated with the @blkg-@pol pair.
 307 */
 308static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
 309                                                  struct blkcg_policy *pol)
 310{
 311        return blkg ? blkg->pd[pol->plid] : NULL;
 312}
 313
 314static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
 315                                                     struct blkcg_policy *pol)
 316{
 317        return blkcg ? blkcg->cpd[pol->plid] : NULL;
 318}
 319
 320/**
 321 * pdata_to_blkg - get blkg associated with policy private data
 322 * @pd: policy private data of interest
 323 *
 324 * @pd is policy private data.  Determine the blkg it's associated with.
 325 */
 326static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
 327{
 328        return pd ? pd->blkg : NULL;
 329}
 330
 331static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
 332{
 333        return cpd ? cpd->blkcg : NULL;
 334}
 335
 336/**
 337 * blkg_path - format cgroup path of blkg
 338 * @blkg: blkg of interest
 339 * @buf: target buffer
 340 * @buflen: target buffer length
 341 *
 342 * Format the path of the cgroup of @blkg into @buf.
 343 */
 344static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
 345{
 346        char *p;
 347
 348        p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
 349        if (!p) {
 350                strncpy(buf, "<unavailable>", buflen);
 351                return -ENAMETOOLONG;
 352        }
 353
 354        memmove(buf, p, buf + buflen - p);
 355        return 0;
 356}
 357
 358/**
 359 * blkg_get - get a blkg reference
 360 * @blkg: blkg to get
 361 *
 362 * The caller should be holding an existing reference.
 363 */
 364static inline void blkg_get(struct blkcg_gq *blkg)
 365{
 366        WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
 367        atomic_inc(&blkg->refcnt);
 368}
 369
 370void __blkg_release_rcu(struct rcu_head *rcu);
 371
 372/**
 373 * blkg_put - put a blkg reference
 374 * @blkg: blkg to put
 375 */
 376static inline void blkg_put(struct blkcg_gq *blkg)
 377{
 378        WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
 379        if (atomic_dec_and_test(&blkg->refcnt))
 380                call_rcu(&blkg->rcu_head, __blkg_release_rcu);
 381}
 382
 383/**
 384 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
 385 * @d_blkg: loop cursor pointing to the current descendant
 386 * @pos_css: used for iteration
 387 * @p_blkg: target blkg to walk descendants of
 388 *
 389 * Walk @c_blkg through the descendants of @p_blkg.  Must be used with RCU
 390 * read locked.  If called under either blkcg or queue lock, the iteration
 391 * is guaranteed to include all and only online blkgs.  The caller may
 392 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
 393 * @p_blkg is included in the iteration and the first node to be visited.
 394 */
 395#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg)           \
 396        css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css)   \
 397                if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
 398                                              (p_blkg)->q, false)))
 399
 400/**
 401 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
 402 * @d_blkg: loop cursor pointing to the current descendant
 403 * @pos_css: used for iteration
 404 * @p_blkg: target blkg to walk descendants of
 405 *
 406 * Similar to blkg_for_each_descendant_pre() but performs post-order
 407 * traversal instead.  Synchronization rules are the same.  @p_blkg is
 408 * included in the iteration and the last node to be visited.
 409 */
 410#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg)          \
 411        css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css)  \
 412                if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css),    \
 413                                              (p_blkg)->q, false)))
 414
 415/**
 416 * blk_get_rl - get request_list to use
 417 * @q: request_queue of interest
 418 * @bio: bio which will be attached to the allocated request (may be %NULL)
 419 *
 420 * The caller wants to allocate a request from @q to use for @bio.  Find
 421 * the request_list to use and obtain a reference on it.  Should be called
 422 * under queue_lock.  This function is guaranteed to return non-%NULL
 423 * request_list.
 424 */
 425static inline struct request_list *blk_get_rl(struct request_queue *q,
 426                                              struct bio *bio)
 427{
 428        struct blkcg *blkcg;
 429        struct blkcg_gq *blkg;
 430
 431        rcu_read_lock();
 432
 433        blkcg = bio_blkcg(bio);
 434
 435        /* bypass blkg lookup and use @q->root_rl directly for root */
 436        if (blkcg == &blkcg_root)
 437                goto root_rl;
 438
 439        /*
 440         * Try to use blkg->rl.  blkg lookup may fail under memory pressure
 441         * or if either the blkcg or queue is going away.  Fall back to
 442         * root_rl in such cases.
 443         */
 444        blkg = blkg_lookup(blkcg, q);
 445        if (unlikely(!blkg))
 446                goto root_rl;
 447
 448        blkg_get(blkg);
 449        rcu_read_unlock();
 450        return &blkg->rl;
 451root_rl:
 452        rcu_read_unlock();
 453        return &q->root_rl;
 454}
 455
 456/**
 457 * blk_put_rl - put request_list
 458 * @rl: request_list to put
 459 *
 460 * Put the reference acquired by blk_get_rl().  Should be called under
 461 * queue_lock.
 462 */
 463static inline void blk_put_rl(struct request_list *rl)
 464{
 465        if (rl->blkg->blkcg != &blkcg_root)
 466                blkg_put(rl->blkg);
 467}
 468
 469/**
 470 * blk_rq_set_rl - associate a request with a request_list
 471 * @rq: request of interest
 472 * @rl: target request_list
 473 *
 474 * Associate @rq with @rl so that accounting and freeing can know the
 475 * request_list @rq came from.
 476 */
 477static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
 478{
 479        rq->rl = rl;
 480}
 481
 482/**
 483 * blk_rq_rl - return the request_list a request came from
 484 * @rq: request of interest
 485 *
 486 * Return the request_list @rq is allocated from.
 487 */
 488static inline struct request_list *blk_rq_rl(struct request *rq)
 489{
 490        return rq->rl;
 491}
 492
 493struct request_list *__blk_queue_next_rl(struct request_list *rl,
 494                                         struct request_queue *q);
 495/**
 496 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
 497 *
 498 * Should be used under queue_lock.
 499 */
 500#define blk_queue_for_each_rl(rl, q)    \
 501        for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
 502
 503static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
 504{
 505        int ret;
 506
 507        ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
 508        if (ret)
 509                return ret;
 510
 511        atomic64_set(&stat->aux_cnt, 0);
 512        return 0;
 513}
 514
 515static inline void blkg_stat_exit(struct blkg_stat *stat)
 516{
 517        percpu_counter_destroy(&stat->cpu_cnt);
 518}
 519
 520/**
 521 * blkg_stat_add - add a value to a blkg_stat
 522 * @stat: target blkg_stat
 523 * @val: value to add
 524 *
 525 * Add @val to @stat.  The caller must ensure that IRQ on the same CPU
 526 * don't re-enter this function for the same counter.
 527 */
 528static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
 529{
 530        __percpu_counter_add(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
 531}
 532
 533/**
 534 * blkg_stat_read - read the current value of a blkg_stat
 535 * @stat: blkg_stat to read
 536 */
 537static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
 538{
 539        return percpu_counter_sum_positive(&stat->cpu_cnt);
 540}
 541
 542/**
 543 * blkg_stat_reset - reset a blkg_stat
 544 * @stat: blkg_stat to reset
 545 */
 546static inline void blkg_stat_reset(struct blkg_stat *stat)
 547{
 548        percpu_counter_set(&stat->cpu_cnt, 0);
 549        atomic64_set(&stat->aux_cnt, 0);
 550}
 551
 552/**
 553 * blkg_stat_add_aux - add a blkg_stat into another's aux count
 554 * @to: the destination blkg_stat
 555 * @from: the source
 556 *
 557 * Add @from's count including the aux one to @to's aux count.
 558 */
 559static inline void blkg_stat_add_aux(struct blkg_stat *to,
 560                                     struct blkg_stat *from)
 561{
 562        atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
 563                     &to->aux_cnt);
 564}
 565
 566static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
 567{
 568        int i, ret;
 569
 570        for (i = 0; i < BLKG_RWSTAT_NR; i++) {
 571                ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
 572                if (ret) {
 573                        while (--i >= 0)
 574                                percpu_counter_destroy(&rwstat->cpu_cnt[i]);
 575                        return ret;
 576                }
 577                atomic64_set(&rwstat->aux_cnt[i], 0);
 578        }
 579        return 0;
 580}
 581
 582static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
 583{
 584        int i;
 585
 586        for (i = 0; i < BLKG_RWSTAT_NR; i++)
 587                percpu_counter_destroy(&rwstat->cpu_cnt[i]);
 588}
 589
 590/**
 591 * blkg_rwstat_add - add a value to a blkg_rwstat
 592 * @rwstat: target blkg_rwstat
 593 * @rw: mask of REQ_{WRITE|SYNC}
 594 * @val: value to add
 595 *
 596 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 597 * caller is responsible for synchronizing calls to this function.
 598 */
 599static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
 600                                   int rw, uint64_t val)
 601{
 602        struct percpu_counter *cnt;
 603
 604        if (rw & REQ_WRITE)
 605                cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
 606        else
 607                cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
 608
 609        __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
 610
 611        if (rw & REQ_SYNC)
 612                cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
 613        else
 614                cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
 615
 616        __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
 617}
 618
 619/**
 620 * blkg_rwstat_read - read the current values of a blkg_rwstat
 621 * @rwstat: blkg_rwstat to read
 622 *
 623 * Read the current snapshot of @rwstat and return it in the aux counts.
 624 */
 625static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
 626{
 627        struct blkg_rwstat result;
 628        int i;
 629
 630        for (i = 0; i < BLKG_RWSTAT_NR; i++)
 631                atomic64_set(&result.aux_cnt[i],
 632                             percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
 633        return result;
 634}
 635
 636/**
 637 * blkg_rwstat_total - read the total count of a blkg_rwstat
 638 * @rwstat: blkg_rwstat to read
 639 *
 640 * Return the total count of @rwstat regardless of the IO direction.  This
 641 * function can be called without synchronization and takes care of u64
 642 * atomicity.
 643 */
 644static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
 645{
 646        struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
 647
 648        return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
 649                atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
 650}
 651
 652/**
 653 * blkg_rwstat_reset - reset a blkg_rwstat
 654 * @rwstat: blkg_rwstat to reset
 655 */
 656static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
 657{
 658        int i;
 659
 660        for (i = 0; i < BLKG_RWSTAT_NR; i++) {
 661                percpu_counter_set(&rwstat->cpu_cnt[i], 0);
 662                atomic64_set(&rwstat->aux_cnt[i], 0);
 663        }
 664}
 665
 666/**
 667 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
 668 * @to: the destination blkg_rwstat
 669 * @from: the source
 670 *
 671 * Add @from's count including the aux one to @to's aux count.
 672 */
 673static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
 674                                       struct blkg_rwstat *from)
 675{
 676        struct blkg_rwstat v = blkg_rwstat_read(from);
 677        int i;
 678
 679        for (i = 0; i < BLKG_RWSTAT_NR; i++)
 680                atomic64_add(atomic64_read(&v.aux_cnt[i]) +
 681                             atomic64_read(&from->aux_cnt[i]),
 682                             &to->aux_cnt[i]);
 683}
 684
 685#ifdef CONFIG_BLK_DEV_THROTTLING
 686extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
 687                           struct bio *bio);
 688#else
 689static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
 690                                  struct bio *bio) { return false; }
 691#endif
 692
 693static inline bool blkcg_bio_issue_check(struct request_queue *q,
 694                                         struct bio *bio)
 695{
 696        struct blkcg *blkcg;
 697        struct blkcg_gq *blkg;
 698        bool throtl = false;
 699
 700        rcu_read_lock();
 701        blkcg = bio_blkcg(bio);
 702
 703        blkg = blkg_lookup(blkcg, q);
 704        if (unlikely(!blkg)) {
 705                spin_lock_irq(q->queue_lock);
 706                blkg = blkg_lookup_create(blkcg, q);
 707                if (IS_ERR(blkg))
 708                        blkg = NULL;
 709                spin_unlock_irq(q->queue_lock);
 710        }
 711
 712        throtl = blk_throtl_bio(q, blkg, bio);
 713
 714        if (!throtl) {
 715                blkg = blkg ?: q->root_blkg;
 716                blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
 717                                bio->bi_iter.bi_size);
 718                blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
 719        }
 720
 721        rcu_read_unlock();
 722        return !throtl;
 723}
 724
 725#else   /* CONFIG_BLK_CGROUP */
 726
 727struct blkcg {
 728};
 729
 730struct blkg_policy_data {
 731};
 732
 733struct blkcg_policy_data {
 734};
 735
 736struct blkcg_gq {
 737};
 738
 739struct blkcg_policy {
 740};
 741
 742#define blkcg_root_css  ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
 743
 744static inline struct cgroup_subsys_state *
 745task_get_blkcg_css(struct task_struct *task)
 746{
 747        return NULL;
 748}
 749
 750#ifdef CONFIG_BLOCK
 751
 752static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
 753static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
 754static inline void blkcg_drain_queue(struct request_queue *q) { }
 755static inline void blkcg_exit_queue(struct request_queue *q) { }
 756static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
 757static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
 758static inline int blkcg_activate_policy(struct request_queue *q,
 759                                        const struct blkcg_policy *pol) { return 0; }
 760static inline void blkcg_deactivate_policy(struct request_queue *q,
 761                                           const struct blkcg_policy *pol) { }
 762
 763static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
 764
 765static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
 766                                                  struct blkcg_policy *pol) { return NULL; }
 767static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
 768static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
 769static inline void blkg_get(struct blkcg_gq *blkg) { }
 770static inline void blkg_put(struct blkcg_gq *blkg) { }
 771
 772static inline struct request_list *blk_get_rl(struct request_queue *q,
 773                                              struct bio *bio) { return &q->root_rl; }
 774static inline void blk_put_rl(struct request_list *rl) { }
 775static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
 776static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
 777
 778static inline bool blkcg_bio_issue_check(struct request_queue *q,
 779                                         struct bio *bio) { return true; }
 780
 781#define blk_queue_for_each_rl(rl, q)    \
 782        for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
 783
 784#endif  /* CONFIG_BLOCK */
 785#endif  /* CONFIG_BLK_CGROUP */
 786#endif  /* _BLK_CGROUP_H */
 787