linux/kernel/cgroup/cgroup-internal.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __CGROUP_INTERNAL_H
   3#define __CGROUP_INTERNAL_H
   4
   5#include <linux/cgroup.h>
   6#include <linux/kernfs.h>
   7#include <linux/workqueue.h>
   8#include <linux/list.h>
   9#include <linux/refcount.h>
  10
  11/*
  12 * A cgroup can be associated with multiple css_sets as different tasks may
  13 * belong to different cgroups on different hierarchies.  In the other
  14 * direction, a css_set is naturally associated with multiple cgroups.
  15 * This M:N relationship is represented by the following link structure
  16 * which exists for each association and allows traversing the associations
  17 * from both sides.
  18 */
  19struct cgrp_cset_link {
  20        /* the cgroup and css_set this link associates */
  21        struct cgroup           *cgrp;
  22        struct css_set          *cset;
  23
  24        /* list of cgrp_cset_links anchored at cgrp->cset_links */
  25        struct list_head        cset_link;
  26
  27        /* list of cgrp_cset_links anchored at css_set->cgrp_links */
  28        struct list_head        cgrp_link;
  29};
  30
  31/* used to track tasks and csets during migration */
  32struct cgroup_taskset {
  33        /* the src and dst cset list running through cset->mg_node */
  34        struct list_head        src_csets;
  35        struct list_head        dst_csets;
  36
  37        /* the number of tasks in the set */
  38        int                     nr_tasks;
  39
  40        /* the subsys currently being processed */
  41        int                     ssid;
  42
  43        /*
  44         * Fields for cgroup_taskset_*() iteration.
  45         *
  46         * Before migration is committed, the target migration tasks are on
  47         * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
  48         * the csets on ->dst_csets.  ->csets point to either ->src_csets
  49         * or ->dst_csets depending on whether migration is committed.
  50         *
  51         * ->cur_csets and ->cur_task point to the current task position
  52         * during iteration.
  53         */
  54        struct list_head        *csets;
  55        struct css_set          *cur_cset;
  56        struct task_struct      *cur_task;
  57};
  58
  59/* migration context also tracks preloading */
  60struct cgroup_mgctx {
  61        /*
  62         * Preloaded source and destination csets.  Used to guarantee
  63         * atomic success or failure on actual migration.
  64         */
  65        struct list_head        preloaded_src_csets;
  66        struct list_head        preloaded_dst_csets;
  67
  68        /* tasks and csets to migrate */
  69        struct cgroup_taskset   tset;
  70
  71        /* subsystems affected by migration */
  72        u16                     ss_mask;
  73};
  74
  75#define CGROUP_TASKSET_INIT(tset)                                               \
  76{                                                                               \
  77        .src_csets              = LIST_HEAD_INIT(tset.src_csets),               \
  78        .dst_csets              = LIST_HEAD_INIT(tset.dst_csets),               \
  79        .csets                  = &tset.src_csets,                              \
  80}
  81
  82#define CGROUP_MGCTX_INIT(name)                                                 \
  83{                                                                               \
  84        LIST_HEAD_INIT(name.preloaded_src_csets),                               \
  85        LIST_HEAD_INIT(name.preloaded_dst_csets),                               \
  86        CGROUP_TASKSET_INIT(name.tset),                                         \
  87}
  88
  89#define DEFINE_CGROUP_MGCTX(name)                                               \
  90        struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
  91
  92struct cgroup_sb_opts {
  93        u16 subsys_mask;
  94        unsigned int flags;
  95        char *release_agent;
  96        bool cpuset_clone_children;
  97        char *name;
  98        /* User explicitly requested empty subsystem */
  99        bool none;
 100};
 101
 102extern struct mutex cgroup_mutex;
 103extern spinlock_t css_set_lock;
 104extern struct cgroup_subsys *cgroup_subsys[];
 105extern struct list_head cgroup_roots;
 106extern struct file_system_type cgroup_fs_type;
 107
 108/* iterate across the hierarchies */
 109#define for_each_root(root)                                             \
 110        list_for_each_entry((root), &cgroup_roots, root_list)
 111
 112/**
 113 * for_each_subsys - iterate all enabled cgroup subsystems
 114 * @ss: the iteration cursor
 115 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
 116 */
 117#define for_each_subsys(ss, ssid)                                       \
 118        for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&                \
 119             (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
 120
 121static inline bool cgroup_is_dead(const struct cgroup *cgrp)
 122{
 123        return !(cgrp->self.flags & CSS_ONLINE);
 124}
 125
 126static inline bool notify_on_release(const struct cgroup *cgrp)
 127{
 128        return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
 129}
 130
 131void put_css_set_locked(struct css_set *cset);
 132
 133static inline void put_css_set(struct css_set *cset)
 134{
 135        unsigned long flags;
 136
 137        /*
 138         * Ensure that the refcount doesn't hit zero while any readers
 139         * can see it. Similar to atomic_dec_and_lock(), but for an
 140         * rwlock
 141         */
 142        if (refcount_dec_not_one(&cset->refcount))
 143                return;
 144
 145        spin_lock_irqsave(&css_set_lock, flags);
 146        put_css_set_locked(cset);
 147        spin_unlock_irqrestore(&css_set_lock, flags);
 148}
 149
 150/*
 151 * refcounted get/put for css_set objects
 152 */
 153static inline void get_css_set(struct css_set *cset)
 154{
 155        refcount_inc(&cset->refcount);
 156}
 157
 158bool cgroup_ssid_enabled(int ssid);
 159bool cgroup_on_dfl(const struct cgroup *cgrp);
 160bool cgroup_is_thread_root(struct cgroup *cgrp);
 161bool cgroup_is_threaded(struct cgroup *cgrp);
 162
 163struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root);
 164struct cgroup *task_cgroup_from_root(struct task_struct *task,
 165                                     struct cgroup_root *root);
 166struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline);
 167void cgroup_kn_unlock(struct kernfs_node *kn);
 168int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
 169                          struct cgroup_namespace *ns);
 170
 171void cgroup_free_root(struct cgroup_root *root);
 172void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts);
 173int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags);
 174int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
 175struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
 176                               struct cgroup_root *root, unsigned long magic,
 177                               struct cgroup_namespace *ns);
 178
 179int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp);
 180void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
 181void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
 182                            struct cgroup_mgctx *mgctx);
 183int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
 184int cgroup_migrate(struct task_struct *leader, bool threadgroup,
 185                   struct cgroup_mgctx *mgctx);
 186
 187int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
 188                       bool threadgroup);
 189struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
 190        __acquires(&cgroup_threadgroup_rwsem);
 191void cgroup_procs_write_finish(struct task_struct *task)
 192        __releases(&cgroup_threadgroup_rwsem);
 193
 194void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
 195
 196int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode);
 197int cgroup_rmdir(struct kernfs_node *kn);
 198int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
 199                     struct kernfs_root *kf_root);
 200
 201int cgroup_task_count(const struct cgroup *cgrp);
 202
 203/*
 204 * rstat.c
 205 */
 206int cgroup_rstat_init(struct cgroup *cgrp);
 207void cgroup_rstat_exit(struct cgroup *cgrp);
 208void cgroup_rstat_boot(void);
 209void cgroup_base_stat_cputime_show(struct seq_file *seq);
 210
 211/*
 212 * namespace.c
 213 */
 214extern const struct proc_ns_operations cgroupns_operations;
 215
 216/*
 217 * cgroup-v1.c
 218 */
 219extern struct cftype cgroup1_base_files[];
 220extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
 221
 222int proc_cgroupstats_show(struct seq_file *m, void *v);
 223bool cgroup1_ssid_disabled(int ssid);
 224void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
 225void cgroup1_release_agent(struct work_struct *work);
 226void cgroup1_check_for_release(struct cgroup *cgrp);
 227struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
 228                             void *data, unsigned long magic,
 229                             struct cgroup_namespace *ns);
 230
 231#endif /* __CGROUP_INTERNAL_H */
 232