linux/include/linux/cgroup.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_CGROUP_H
   3#define _LINUX_CGROUP_H
   4/*
   5 *  cgroup interface
   6 *
   7 *  Copyright (C) 2003 BULL SA
   8 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
   9 *
  10 */
  11
  12#include <linux/sched.h>
  13#include <linux/cpumask.h>
  14#include <linux/nodemask.h>
  15#include <linux/rculist.h>
  16#include <linux/cgroupstats.h>
  17#include <linux/fs.h>
  18#include <linux/seq_file.h>
  19#include <linux/kernfs.h>
  20#include <linux/jump_label.h>
  21#include <linux/types.h>
  22#include <linux/ns_common.h>
  23#include <linux/nsproxy.h>
  24#include <linux/user_namespace.h>
  25#include <linux/refcount.h>
  26#include <linux/kernel_stat.h>
  27
  28#include <linux/cgroup-defs.h>
  29
  30#ifdef CONFIG_CGROUPS
  31
  32/*
  33 * All weight knobs on the default hierarhcy should use the following min,
  34 * default and max values.  The default value is the logarithmic center of
  35 * MIN and MAX and allows 100x to be expressed in both directions.
  36 */
  37#define CGROUP_WEIGHT_MIN               1
  38#define CGROUP_WEIGHT_DFL               100
  39#define CGROUP_WEIGHT_MAX               10000
  40
  41/* walk only threadgroup leaders */
  42#define CSS_TASK_ITER_PROCS             (1U << 0)
  43/* walk all threaded css_sets in the domain */
  44#define CSS_TASK_ITER_THREADED          (1U << 1)
  45
  46/* a css_task_iter should be treated as an opaque object */
  47struct css_task_iter {
  48        struct cgroup_subsys            *ss;
  49        unsigned int                    flags;
  50
  51        struct list_head                *cset_pos;
  52        struct list_head                *cset_head;
  53
  54        struct list_head                *tcset_pos;
  55        struct list_head                *tcset_head;
  56
  57        struct list_head                *task_pos;
  58        struct list_head                *tasks_head;
  59        struct list_head                *mg_tasks_head;
  60
  61        struct css_set                  *cur_cset;
  62        struct css_set                  *cur_dcset;
  63        struct task_struct              *cur_task;
  64        struct list_head                iters_node;     /* css_set->task_iters */
  65};
  66
  67extern struct cgroup_root cgrp_dfl_root;
  68extern struct css_set init_css_set;
  69
  70#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
  71#include <linux/cgroup_subsys.h>
  72#undef SUBSYS
  73
  74#define SUBSYS(_x)                                                              \
  75        extern struct static_key_true _x ## _cgrp_subsys_enabled_key;           \
  76        extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
  77#include <linux/cgroup_subsys.h>
  78#undef SUBSYS
  79
  80/**
  81 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
  82 * @ss: subsystem in question
  83 */
  84#define cgroup_subsys_enabled(ss)                                               \
  85        static_branch_likely(&ss ## _enabled_key)
  86
  87/**
  88 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
  89 * @ss: subsystem in question
  90 */
  91#define cgroup_subsys_on_dfl(ss)                                                \
  92        static_branch_likely(&ss ## _on_dfl_key)
  93
  94bool css_has_online_children(struct cgroup_subsys_state *css);
  95struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
  96struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
  97                                         struct cgroup_subsys *ss);
  98struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
  99                                             struct cgroup_subsys *ss);
 100struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
 101                                                       struct cgroup_subsys *ss);
 102
 103struct cgroup *cgroup_get_from_path(const char *path);
 104struct cgroup *cgroup_get_from_fd(int fd);
 105
 106int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
 107int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
 108
 109int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 110int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 111int cgroup_rm_cftypes(struct cftype *cfts);
 112void cgroup_file_notify(struct cgroup_file *cfile);
 113
 114int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
 115int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
 116int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
 117                     struct pid *pid, struct task_struct *tsk);
 118
 119void cgroup_fork(struct task_struct *p);
 120extern int cgroup_can_fork(struct task_struct *p);
 121extern void cgroup_cancel_fork(struct task_struct *p);
 122extern void cgroup_post_fork(struct task_struct *p);
 123void cgroup_exit(struct task_struct *p);
 124void cgroup_release(struct task_struct *p);
 125void cgroup_free(struct task_struct *p);
 126
 127int cgroup_init_early(void);
 128int cgroup_init(void);
 129
 130/*
 131 * Iteration helpers and macros.
 132 */
 133
 134struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
 135                                           struct cgroup_subsys_state *parent);
 136struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
 137                                                    struct cgroup_subsys_state *css);
 138struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
 139struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
 140                                                     struct cgroup_subsys_state *css);
 141
 142struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
 143                                         struct cgroup_subsys_state **dst_cssp);
 144struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
 145                                        struct cgroup_subsys_state **dst_cssp);
 146
 147void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
 148                         struct css_task_iter *it);
 149struct task_struct *css_task_iter_next(struct css_task_iter *it);
 150void css_task_iter_end(struct css_task_iter *it);
 151
 152/**
 153 * css_for_each_child - iterate through children of a css
 154 * @pos: the css * to use as the loop cursor
 155 * @parent: css whose children to walk
 156 *
 157 * Walk @parent's children.  Must be called under rcu_read_lock().
 158 *
 159 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 160 * css which finished ->css_online() is guaranteed to be visible in the
 161 * future iterations and will stay visible until the last reference is put.
 162 * A css which hasn't finished ->css_online() or already finished
 163 * ->css_offline() may show up during traversal.  It's each subsystem's
 164 * responsibility to synchronize against on/offlining.
 165 *
 166 * It is allowed to temporarily drop RCU read lock during iteration.  The
 167 * caller is responsible for ensuring that @pos remains accessible until
 168 * the start of the next iteration by, for example, bumping the css refcnt.
 169 */
 170#define css_for_each_child(pos, parent)                                 \
 171        for ((pos) = css_next_child(NULL, (parent)); (pos);             \
 172             (pos) = css_next_child((pos), (parent)))
 173
 174/**
 175 * css_for_each_descendant_pre - pre-order walk of a css's descendants
 176 * @pos: the css * to use as the loop cursor
 177 * @root: css whose descendants to walk
 178 *
 179 * Walk @root's descendants.  @root is included in the iteration and the
 180 * first node to be visited.  Must be called under rcu_read_lock().
 181 *
 182 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 183 * css which finished ->css_online() is guaranteed to be visible in the
 184 * future iterations and will stay visible until the last reference is put.
 185 * A css which hasn't finished ->css_online() or already finished
 186 * ->css_offline() may show up during traversal.  It's each subsystem's
 187 * responsibility to synchronize against on/offlining.
 188 *
 189 * For example, the following guarantees that a descendant can't escape
 190 * state updates of its ancestors.
 191 *
 192 * my_online(@css)
 193 * {
 194 *      Lock @css's parent and @css;
 195 *      Inherit state from the parent;
 196 *      Unlock both.
 197 * }
 198 *
 199 * my_update_state(@css)
 200 * {
 201 *      css_for_each_descendant_pre(@pos, @css) {
 202 *              Lock @pos;
 203 *              if (@pos == @css)
 204 *                      Update @css's state;
 205 *              else
 206 *                      Verify @pos is alive and inherit state from its parent;
 207 *              Unlock @pos;
 208 *      }
 209 * }
 210 *
 211 * As long as the inheriting step, including checking the parent state, is
 212 * enclosed inside @pos locking, double-locking the parent isn't necessary
 213 * while inheriting.  The state update to the parent is guaranteed to be
 214 * visible by walking order and, as long as inheriting operations to the
 215 * same @pos are atomic to each other, multiple updates racing each other
 216 * still result in the correct state.  It's guaranateed that at least one
 217 * inheritance happens for any css after the latest update to its parent.
 218 *
 219 * If checking parent's state requires locking the parent, each inheriting
 220 * iteration should lock and unlock both @pos->parent and @pos.
 221 *
 222 * Alternatively, a subsystem may choose to use a single global lock to
 223 * synchronize ->css_online() and ->css_offline() against tree-walking
 224 * operations.
 225 *
 226 * It is allowed to temporarily drop RCU read lock during iteration.  The
 227 * caller is responsible for ensuring that @pos remains accessible until
 228 * the start of the next iteration by, for example, bumping the css refcnt.
 229 */
 230#define css_for_each_descendant_pre(pos, css)                           \
 231        for ((pos) = css_next_descendant_pre(NULL, (css)); (pos);       \
 232             (pos) = css_next_descendant_pre((pos), (css)))
 233
 234/**
 235 * css_for_each_descendant_post - post-order walk of a css's descendants
 236 * @pos: the css * to use as the loop cursor
 237 * @css: css whose descendants to walk
 238 *
 239 * Similar to css_for_each_descendant_pre() but performs post-order
 240 * traversal instead.  @root is included in the iteration and the last
 241 * node to be visited.
 242 *
 243 * If a subsystem synchronizes ->css_online() and the start of iteration, a
 244 * css which finished ->css_online() is guaranteed to be visible in the
 245 * future iterations and will stay visible until the last reference is put.
 246 * A css which hasn't finished ->css_online() or already finished
 247 * ->css_offline() may show up during traversal.  It's each subsystem's
 248 * responsibility to synchronize against on/offlining.
 249 *
 250 * Note that the walk visibility guarantee example described in pre-order
 251 * walk doesn't apply the same to post-order walks.
 252 */
 253#define css_for_each_descendant_post(pos, css)                          \
 254        for ((pos) = css_next_descendant_post(NULL, (css)); (pos);      \
 255             (pos) = css_next_descendant_post((pos), (css)))
 256
 257/**
 258 * cgroup_taskset_for_each - iterate cgroup_taskset
 259 * @task: the loop cursor
 260 * @dst_css: the destination css
 261 * @tset: taskset to iterate
 262 *
 263 * @tset may contain multiple tasks and they may belong to multiple
 264 * processes.
 265 *
 266 * On the v2 hierarchy, there may be tasks from multiple processes and they
 267 * may not share the source or destination csses.
 268 *
 269 * On traditional hierarchies, when there are multiple tasks in @tset, if a
 270 * task of a process is in @tset, all tasks of the process are in @tset.
 271 * Also, all are guaranteed to share the same source and destination csses.
 272 *
 273 * Iteration is not in any specific order.
 274 */
 275#define cgroup_taskset_for_each(task, dst_css, tset)                    \
 276        for ((task) = cgroup_taskset_first((tset), &(dst_css));         \
 277             (task);                                                    \
 278             (task) = cgroup_taskset_next((tset), &(dst_css)))
 279
 280/**
 281 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
 282 * @leader: the loop cursor
 283 * @dst_css: the destination css
 284 * @tset: taskset to iterate
 285 *
 286 * Iterate threadgroup leaders of @tset.  For single-task migrations, @tset
 287 * may not contain any.
 288 */
 289#define cgroup_taskset_for_each_leader(leader, dst_css, tset)           \
 290        for ((leader) = cgroup_taskset_first((tset), &(dst_css));       \
 291             (leader);                                                  \
 292             (leader) = cgroup_taskset_next((tset), &(dst_css)))        \
 293                if ((leader) != (leader)->group_leader)                 \
 294                        ;                                               \
 295                else
 296
 297/*
 298 * Inline functions.
 299 */
 300
 301/**
 302 * css_get - obtain a reference on the specified css
 303 * @css: target css
 304 *
 305 * The caller must already have a reference.
 306 */
 307static inline void css_get(struct cgroup_subsys_state *css)
 308{
 309        if (!(css->flags & CSS_NO_REF))
 310                percpu_ref_get(&css->refcnt);
 311}
 312
 313/**
 314 * css_get_many - obtain references on the specified css
 315 * @css: target css
 316 * @n: number of references to get
 317 *
 318 * The caller must already have a reference.
 319 */
 320static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
 321{
 322        if (!(css->flags & CSS_NO_REF))
 323                percpu_ref_get_many(&css->refcnt, n);
 324}
 325
 326/**
 327 * css_tryget - try to obtain a reference on the specified css
 328 * @css: target css
 329 *
 330 * Obtain a reference on @css unless it already has reached zero and is
 331 * being released.  This function doesn't care whether @css is on or
 332 * offline.  The caller naturally needs to ensure that @css is accessible
 333 * but doesn't have to be holding a reference on it - IOW, RCU protected
 334 * access is good enough for this function.  Returns %true if a reference
 335 * count was successfully obtained; %false otherwise.
 336 */
 337static inline bool css_tryget(struct cgroup_subsys_state *css)
 338{
 339        if (!(css->flags & CSS_NO_REF))
 340                return percpu_ref_tryget(&css->refcnt);
 341        return true;
 342}
 343
 344/**
 345 * css_tryget_online - try to obtain a reference on the specified css if online
 346 * @css: target css
 347 *
 348 * Obtain a reference on @css if it's online.  The caller naturally needs
 349 * to ensure that @css is accessible but doesn't have to be holding a
 350 * reference on it - IOW, RCU protected access is good enough for this
 351 * function.  Returns %true if a reference count was successfully obtained;
 352 * %false otherwise.
 353 */
 354static inline bool css_tryget_online(struct cgroup_subsys_state *css)
 355{
 356        if (!(css->flags & CSS_NO_REF))
 357                return percpu_ref_tryget_live(&css->refcnt);
 358        return true;
 359}
 360
 361/**
 362 * css_is_dying - test whether the specified css is dying
 363 * @css: target css
 364 *
 365 * Test whether @css is in the process of offlining or already offline.  In
 366 * most cases, ->css_online() and ->css_offline() callbacks should be
 367 * enough; however, the actual offline operations are RCU delayed and this
 368 * test returns %true also when @css is scheduled to be offlined.
 369 *
 370 * This is useful, for example, when the use case requires synchronous
 371 * behavior with respect to cgroup removal.  cgroup removal schedules css
 372 * offlining but the css can seem alive while the operation is being
 373 * delayed.  If the delay affects user visible semantics, this test can be
 374 * used to resolve the situation.
 375 */
 376static inline bool css_is_dying(struct cgroup_subsys_state *css)
 377{
 378        return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
 379}
 380
 381/**
 382 * css_put - put a css reference
 383 * @css: target css
 384 *
 385 * Put a reference obtained via css_get() and css_tryget_online().
 386 */
 387static inline void css_put(struct cgroup_subsys_state *css)
 388{
 389        if (!(css->flags & CSS_NO_REF))
 390                percpu_ref_put(&css->refcnt);
 391}
 392
 393/**
 394 * css_put_many - put css references
 395 * @css: target css
 396 * @n: number of references to put
 397 *
 398 * Put references obtained via css_get() and css_tryget_online().
 399 */
 400static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
 401{
 402        if (!(css->flags & CSS_NO_REF))
 403                percpu_ref_put_many(&css->refcnt, n);
 404}
 405
 406static inline void cgroup_get(struct cgroup *cgrp)
 407{
 408        css_get(&cgrp->self);
 409}
 410
 411static inline bool cgroup_tryget(struct cgroup *cgrp)
 412{
 413        return css_tryget(&cgrp->self);
 414}
 415
 416static inline void cgroup_put(struct cgroup *cgrp)
 417{
 418        css_put(&cgrp->self);
 419}
 420
 421/**
 422 * task_css_set_check - obtain a task's css_set with extra access conditions
 423 * @task: the task to obtain css_set for
 424 * @__c: extra condition expression to be passed to rcu_dereference_check()
 425 *
 426 * A task's css_set is RCU protected, initialized and exited while holding
 427 * task_lock(), and can only be modified while holding both cgroup_mutex
 428 * and task_lock() while the task is alive.  This macro verifies that the
 429 * caller is inside proper critical section and returns @task's css_set.
 430 *
 431 * The caller can also specify additional allowed conditions via @__c, such
 432 * as locks used during the cgroup_subsys::attach() methods.
 433 */
 434#ifdef CONFIG_PROVE_RCU
 435extern struct mutex cgroup_mutex;
 436extern spinlock_t css_set_lock;
 437#define task_css_set_check(task, __c)                                   \
 438        rcu_dereference_check((task)->cgroups,                          \
 439                lockdep_is_held(&cgroup_mutex) ||                       \
 440                lockdep_is_held(&css_set_lock) ||                       \
 441                ((task)->flags & PF_EXITING) || (__c))
 442#else
 443#define task_css_set_check(task, __c)                                   \
 444        rcu_dereference((task)->cgroups)
 445#endif
 446
 447/**
 448 * task_css_check - obtain css for (task, subsys) w/ extra access conds
 449 * @task: the target task
 450 * @subsys_id: the target subsystem ID
 451 * @__c: extra condition expression to be passed to rcu_dereference_check()
 452 *
 453 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair.  The
 454 * synchronization rules are the same as task_css_set_check().
 455 */
 456#define task_css_check(task, subsys_id, __c)                            \
 457        task_css_set_check((task), (__c))->subsys[(subsys_id)]
 458
 459/**
 460 * task_css_set - obtain a task's css_set
 461 * @task: the task to obtain css_set for
 462 *
 463 * See task_css_set_check().
 464 */
 465static inline struct css_set *task_css_set(struct task_struct *task)
 466{
 467        return task_css_set_check(task, false);
 468}
 469
 470/**
 471 * task_css - obtain css for (task, subsys)
 472 * @task: the target task
 473 * @subsys_id: the target subsystem ID
 474 *
 475 * See task_css_check().
 476 */
 477static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
 478                                                   int subsys_id)
 479{
 480        return task_css_check(task, subsys_id, false);
 481}
 482
 483/**
 484 * task_get_css - find and get the css for (task, subsys)
 485 * @task: the target task
 486 * @subsys_id: the target subsystem ID
 487 *
 488 * Find the css for the (@task, @subsys_id) combination, increment a
 489 * reference on and return it.  This function is guaranteed to return a
 490 * valid css.
 491 */
 492static inline struct cgroup_subsys_state *
 493task_get_css(struct task_struct *task, int subsys_id)
 494{
 495        struct cgroup_subsys_state *css;
 496
 497        rcu_read_lock();
 498        while (true) {
 499                css = task_css(task, subsys_id);
 500                if (likely(css_tryget_online(css)))
 501                        break;
 502                cpu_relax();
 503        }
 504        rcu_read_unlock();
 505        return css;
 506}
 507
 508/**
 509 * task_css_is_root - test whether a task belongs to the root css
 510 * @task: the target task
 511 * @subsys_id: the target subsystem ID
 512 *
 513 * Test whether @task belongs to the root css on the specified subsystem.
 514 * May be invoked in any context.
 515 */
 516static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
 517{
 518        return task_css_check(task, subsys_id, true) ==
 519                init_css_set.subsys[subsys_id];
 520}
 521
 522static inline struct cgroup *task_cgroup(struct task_struct *task,
 523                                         int subsys_id)
 524{
 525        return task_css(task, subsys_id)->cgroup;
 526}
 527
 528static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
 529{
 530        return task_css_set(task)->dfl_cgrp;
 531}
 532
 533static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
 534{
 535        struct cgroup_subsys_state *parent_css = cgrp->self.parent;
 536
 537        if (parent_css)
 538                return container_of(parent_css, struct cgroup, self);
 539        return NULL;
 540}
 541
 542/**
 543 * cgroup_is_descendant - test ancestry
 544 * @cgrp: the cgroup to be tested
 545 * @ancestor: possible ancestor of @cgrp
 546 *
 547 * Test whether @cgrp is a descendant of @ancestor.  It also returns %true
 548 * if @cgrp == @ancestor.  This function is safe to call as long as @cgrp
 549 * and @ancestor are accessible.
 550 */
 551static inline bool cgroup_is_descendant(struct cgroup *cgrp,
 552                                        struct cgroup *ancestor)
 553{
 554        if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
 555                return false;
 556        return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
 557}
 558
 559/**
 560 * cgroup_ancestor - find ancestor of cgroup
 561 * @cgrp: cgroup to find ancestor of
 562 * @ancestor_level: level of ancestor to find starting from root
 563 *
 564 * Find ancestor of cgroup at specified level starting from root if it exists
 565 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
 566 * @ancestor_level.
 567 *
 568 * This function is safe to call as long as @cgrp is accessible.
 569 */
 570static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
 571                                             int ancestor_level)
 572{
 573        if (cgrp->level < ancestor_level)
 574                return NULL;
 575        while (cgrp && cgrp->level > ancestor_level)
 576                cgrp = cgroup_parent(cgrp);
 577        return cgrp;
 578}
 579
 580/**
 581 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
 582 * @task: the task to be tested
 583 * @ancestor: possible ancestor of @task's cgroup
 584 *
 585 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
 586 * It follows all the same rules as cgroup_is_descendant, and only applies
 587 * to the default hierarchy.
 588 */
 589static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
 590                                               struct cgroup *ancestor)
 591{
 592        struct css_set *cset = task_css_set(task);
 593
 594        return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
 595}
 596
 597/* no synchronization, the result can only be used as a hint */
 598static inline bool cgroup_is_populated(struct cgroup *cgrp)
 599{
 600        return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
 601                cgrp->nr_populated_threaded_children;
 602}
 603
 604/* returns ino associated with a cgroup */
 605static inline ino_t cgroup_ino(struct cgroup *cgrp)
 606{
 607        return cgrp->kn->id.ino;
 608}
 609
 610/* cft/css accessors for cftype->write() operation */
 611static inline struct cftype *of_cft(struct kernfs_open_file *of)
 612{
 613        return of->kn->priv;
 614}
 615
 616struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
 617
 618/* cft/css accessors for cftype->seq_*() operations */
 619static inline struct cftype *seq_cft(struct seq_file *seq)
 620{
 621        return of_cft(seq->private);
 622}
 623
 624static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
 625{
 626        return of_css(seq->private);
 627}
 628
 629/*
 630 * Name / path handling functions.  All are thin wrappers around the kernfs
 631 * counterparts and can be called under any context.
 632 */
 633
 634static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
 635{
 636        return kernfs_name(cgrp->kn, buf, buflen);
 637}
 638
 639static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
 640{
 641        return kernfs_path(cgrp->kn, buf, buflen);
 642}
 643
 644static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
 645{
 646        pr_cont_kernfs_name(cgrp->kn);
 647}
 648
 649static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
 650{
 651        pr_cont_kernfs_path(cgrp->kn);
 652}
 653
 654static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
 655{
 656        return &cgrp->psi;
 657}
 658
 659static inline void cgroup_init_kthreadd(void)
 660{
 661        /*
 662         * kthreadd is inherited by all kthreads, keep it in the root so
 663         * that the new kthreads are guaranteed to stay in the root until
 664         * initialization is finished.
 665         */
 666        current->no_cgroup_migration = 1;
 667}
 668
 669static inline void cgroup_kthread_ready(void)
 670{
 671        /*
 672         * This kthread finished initialization.  The creator should have
 673         * set PF_NO_SETAFFINITY if this kthread should stay in the root.
 674         */
 675        current->no_cgroup_migration = 0;
 676}
 677
 678static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
 679{
 680        return &cgrp->kn->id;
 681}
 682
 683void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
 684                                        char *buf, size_t buflen);
 685#else /* !CONFIG_CGROUPS */
 686
 687struct cgroup_subsys_state;
 688struct cgroup;
 689
 690static inline void css_put(struct cgroup_subsys_state *css) {}
 691static inline int cgroup_attach_task_all(struct task_struct *from,
 692                                         struct task_struct *t) { return 0; }
 693static inline int cgroupstats_build(struct cgroupstats *stats,
 694                                    struct dentry *dentry) { return -EINVAL; }
 695
 696static inline void cgroup_fork(struct task_struct *p) {}
 697static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
 698static inline void cgroup_cancel_fork(struct task_struct *p) {}
 699static inline void cgroup_post_fork(struct task_struct *p) {}
 700static inline void cgroup_exit(struct task_struct *p) {}
 701static inline void cgroup_release(struct task_struct *p) {}
 702static inline void cgroup_free(struct task_struct *p) {}
 703
 704static inline int cgroup_init_early(void) { return 0; }
 705static inline int cgroup_init(void) { return 0; }
 706static inline void cgroup_init_kthreadd(void) {}
 707static inline void cgroup_kthread_ready(void) {}
 708static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
 709{
 710        return NULL;
 711}
 712
 713static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
 714{
 715        return NULL;
 716}
 717
 718static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
 719{
 720        return NULL;
 721}
 722
 723static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
 724                                               struct cgroup *ancestor)
 725{
 726        return true;
 727}
 728
 729static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
 730        char *buf, size_t buflen) {}
 731#endif /* !CONFIG_CGROUPS */
 732
 733#ifdef CONFIG_CGROUPS
 734/*
 735 * cgroup scalable recursive statistics.
 736 */
 737void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
 738void cgroup_rstat_flush(struct cgroup *cgrp);
 739void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
 740void cgroup_rstat_flush_hold(struct cgroup *cgrp);
 741void cgroup_rstat_flush_release(void);
 742
 743/*
 744 * Basic resource stats.
 745 */
 746#ifdef CONFIG_CGROUP_CPUACCT
 747void cpuacct_charge(struct task_struct *tsk, u64 cputime);
 748void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
 749#else
 750static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
 751static inline void cpuacct_account_field(struct task_struct *tsk, int index,
 752                                         u64 val) {}
 753#endif
 754
 755void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
 756void __cgroup_account_cputime_field(struct cgroup *cgrp,
 757                                    enum cpu_usage_stat index, u64 delta_exec);
 758
 759static inline void cgroup_account_cputime(struct task_struct *task,
 760                                          u64 delta_exec)
 761{
 762        struct cgroup *cgrp;
 763
 764        cpuacct_charge(task, delta_exec);
 765
 766        rcu_read_lock();
 767        cgrp = task_dfl_cgroup(task);
 768        if (cgroup_parent(cgrp))
 769                __cgroup_account_cputime(cgrp, delta_exec);
 770        rcu_read_unlock();
 771}
 772
 773static inline void cgroup_account_cputime_field(struct task_struct *task,
 774                                                enum cpu_usage_stat index,
 775                                                u64 delta_exec)
 776{
 777        struct cgroup *cgrp;
 778
 779        cpuacct_account_field(task, index, delta_exec);
 780
 781        rcu_read_lock();
 782        cgrp = task_dfl_cgroup(task);
 783        if (cgroup_parent(cgrp))
 784                __cgroup_account_cputime_field(cgrp, index, delta_exec);
 785        rcu_read_unlock();
 786}
 787
 788#else   /* CONFIG_CGROUPS */
 789
 790static inline void cgroup_account_cputime(struct task_struct *task,
 791                                          u64 delta_exec) {}
 792static inline void cgroup_account_cputime_field(struct task_struct *task,
 793                                                enum cpu_usage_stat index,
 794                                                u64 delta_exec) {}
 795
 796#endif  /* CONFIG_CGROUPS */
 797
 798/*
 799 * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
 800 * definition in cgroup-defs.h.
 801 */
 802#ifdef CONFIG_SOCK_CGROUP_DATA
 803
 804#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
 805extern spinlock_t cgroup_sk_update_lock;
 806#endif
 807
 808void cgroup_sk_alloc_disable(void);
 809void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
 810void cgroup_sk_free(struct sock_cgroup_data *skcd);
 811
 812static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
 813{
 814#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
 815        unsigned long v;
 816
 817        /*
 818         * @skcd->val is 64bit but the following is safe on 32bit too as we
 819         * just need the lower ulong to be written and read atomically.
 820         */
 821        v = READ_ONCE(skcd->val);
 822
 823        if (v & 1)
 824                return &cgrp_dfl_root.cgrp;
 825
 826        return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
 827#else
 828        return (struct cgroup *)(unsigned long)skcd->val;
 829#endif
 830}
 831
 832#else   /* CONFIG_CGROUP_DATA */
 833
 834static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
 835static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
 836
 837#endif  /* CONFIG_CGROUP_DATA */
 838
 839struct cgroup_namespace {
 840        refcount_t              count;
 841        struct ns_common        ns;
 842        struct user_namespace   *user_ns;
 843        struct ucounts          *ucounts;
 844        struct css_set          *root_cset;
 845};
 846
 847extern struct cgroup_namespace init_cgroup_ns;
 848
 849#ifdef CONFIG_CGROUPS
 850
 851void free_cgroup_ns(struct cgroup_namespace *ns);
 852
 853struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
 854                                        struct user_namespace *user_ns,
 855                                        struct cgroup_namespace *old_ns);
 856
 857int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
 858                   struct cgroup_namespace *ns);
 859
 860#else /* !CONFIG_CGROUPS */
 861
 862static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
 863static inline struct cgroup_namespace *
 864copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
 865               struct cgroup_namespace *old_ns)
 866{
 867        return old_ns;
 868}
 869
 870#endif /* !CONFIG_CGROUPS */
 871
 872static inline void get_cgroup_ns(struct cgroup_namespace *ns)
 873{
 874        if (ns)
 875                refcount_inc(&ns->count);
 876}
 877
 878static inline void put_cgroup_ns(struct cgroup_namespace *ns)
 879{
 880        if (ns && refcount_dec_and_test(&ns->count))
 881                free_cgroup_ns(ns);
 882}
 883
 884#endif /* _LINUX_CGROUP_H */
 885