linux/kernel/cgroup/cgroup-v1.c
<<
>>
Prefs
   1#include "cgroup-internal.h"
   2
   3#include <linux/ctype.h>
   4#include <linux/kmod.h>
   5#include <linux/sort.h>
   6#include <linux/delay.h>
   7#include <linux/mm.h>
   8#include <linux/sched/signal.h>
   9#include <linux/sched/task.h>
  10#include <linux/magic.h>
  11#include <linux/slab.h>
  12#include <linux/vmalloc.h>
  13#include <linux/delayacct.h>
  14#include <linux/pid_namespace.h>
  15#include <linux/cgroupstats.h>
  16
  17#include <trace/events/cgroup.h>
  18
  19/*
  20 * pidlists linger the following amount before being destroyed.  The goal
  21 * is avoiding frequent destruction in the middle of consecutive read calls
  22 * Expiring in the middle is a performance problem not a correctness one.
  23 * 1 sec should be enough.
  24 */
  25#define CGROUP_PIDLIST_DESTROY_DELAY    HZ
  26
  27/* Controllers blocked by the commandline in v1 */
  28static u16 cgroup_no_v1_mask;
  29
  30/*
  31 * pidlist destructions need to be flushed on cgroup destruction.  Use a
  32 * separate workqueue as flush domain.
  33 */
  34static struct workqueue_struct *cgroup_pidlist_destroy_wq;
  35
  36/*
  37 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
  38 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
  39 */
  40static DEFINE_SPINLOCK(release_agent_path_lock);
  41
  42bool cgroup1_ssid_disabled(int ssid)
  43{
  44        return cgroup_no_v1_mask & (1 << ssid);
  45}
  46
  47/**
  48 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
  49 * @from: attach to all cgroups of a given task
  50 * @tsk: the task to be attached
  51 */
  52int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
  53{
  54        struct cgroup_root *root;
  55        int retval = 0;
  56
  57        mutex_lock(&cgroup_mutex);
  58        percpu_down_write(&cgroup_threadgroup_rwsem);
  59        for_each_root(root) {
  60                struct cgroup *from_cgrp;
  61
  62                if (root == &cgrp_dfl_root)
  63                        continue;
  64
  65                spin_lock_irq(&css_set_lock);
  66                from_cgrp = task_cgroup_from_root(from, root);
  67                spin_unlock_irq(&css_set_lock);
  68
  69                retval = cgroup_attach_task(from_cgrp, tsk, false);
  70                if (retval)
  71                        break;
  72        }
  73        percpu_up_write(&cgroup_threadgroup_rwsem);
  74        mutex_unlock(&cgroup_mutex);
  75
  76        return retval;
  77}
  78EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
  79
  80/**
  81 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
  82 * @to: cgroup to which the tasks will be moved
  83 * @from: cgroup in which the tasks currently reside
  84 *
  85 * Locking rules between cgroup_post_fork() and the migration path
  86 * guarantee that, if a task is forking while being migrated, the new child
  87 * is guaranteed to be either visible in the source cgroup after the
  88 * parent's migration is complete or put into the target cgroup.  No task
  89 * can slip out of migration through forking.
  90 */
  91int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
  92{
  93        DEFINE_CGROUP_MGCTX(mgctx);
  94        struct cgrp_cset_link *link;
  95        struct css_task_iter it;
  96        struct task_struct *task;
  97        int ret;
  98
  99        if (cgroup_on_dfl(to))
 100                return -EINVAL;
 101
 102        if (!cgroup_may_migrate_to(to))
 103                return -EBUSY;
 104
 105        mutex_lock(&cgroup_mutex);
 106
 107        percpu_down_write(&cgroup_threadgroup_rwsem);
 108
 109        /* all tasks in @from are being moved, all csets are source */
 110        spin_lock_irq(&css_set_lock);
 111        list_for_each_entry(link, &from->cset_links, cset_link)
 112                cgroup_migrate_add_src(link->cset, to, &mgctx);
 113        spin_unlock_irq(&css_set_lock);
 114
 115        ret = cgroup_migrate_prepare_dst(&mgctx);
 116        if (ret)
 117                goto out_err;
 118
 119        /*
 120         * Migrate tasks one-by-one until @from is empty.  This fails iff
 121         * ->can_attach() fails.
 122         */
 123        do {
 124                css_task_iter_start(&from->self, &it);
 125                task = css_task_iter_next(&it);
 126                if (task)
 127                        get_task_struct(task);
 128                css_task_iter_end(&it);
 129
 130                if (task) {
 131                        ret = cgroup_migrate(task, false, &mgctx);
 132                        if (!ret)
 133                                trace_cgroup_transfer_tasks(to, task, false);
 134                        put_task_struct(task);
 135                }
 136        } while (task && !ret);
 137out_err:
 138        cgroup_migrate_finish(&mgctx);
 139        percpu_up_write(&cgroup_threadgroup_rwsem);
 140        mutex_unlock(&cgroup_mutex);
 141        return ret;
 142}
 143
 144/*
 145 * Stuff for reading the 'tasks'/'procs' files.
 146 *
 147 * Reading this file can return large amounts of data if a cgroup has
 148 * *lots* of attached tasks. So it may need several calls to read(),
 149 * but we cannot guarantee that the information we produce is correct
 150 * unless we produce it entirely atomically.
 151 *
 152 */
 153
 154/* which pidlist file are we talking about? */
 155enum cgroup_filetype {
 156        CGROUP_FILE_PROCS,
 157        CGROUP_FILE_TASKS,
 158};
 159
 160/*
 161 * A pidlist is a list of pids that virtually represents the contents of one
 162 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 163 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 164 * to the cgroup.
 165 */
 166struct cgroup_pidlist {
 167        /*
 168         * used to find which pidlist is wanted. doesn't change as long as
 169         * this particular list stays in the list.
 170        */
 171        struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
 172        /* array of xids */
 173        pid_t *list;
 174        /* how many elements the above list has */
 175        int length;
 176        /* each of these stored in a list by its cgroup */
 177        struct list_head links;
 178        /* pointer to the cgroup we belong to, for list removal purposes */
 179        struct cgroup *owner;
 180        /* for delayed destruction */
 181        struct delayed_work destroy_dwork;
 182};
 183
 184/*
 185 * The following two functions "fix" the issue where there are more pids
 186 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 187 * TODO: replace with a kernel-wide solution to this problem
 188 */
 189#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
 190static void *pidlist_allocate(int count)
 191{
 192        if (PIDLIST_TOO_LARGE(count))
 193                return vmalloc(count * sizeof(pid_t));
 194        else
 195                return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
 196}
 197
 198static void pidlist_free(void *p)
 199{
 200        kvfree(p);
 201}
 202
 203/*
 204 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 205 * should be left afterwards.
 206 */
 207void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
 208{
 209        struct cgroup_pidlist *l, *tmp_l;
 210
 211        mutex_lock(&cgrp->pidlist_mutex);
 212        list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
 213                mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
 214        mutex_unlock(&cgrp->pidlist_mutex);
 215
 216        flush_workqueue(cgroup_pidlist_destroy_wq);
 217        BUG_ON(!list_empty(&cgrp->pidlists));
 218}
 219
 220static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
 221{
 222        struct delayed_work *dwork = to_delayed_work(work);
 223        struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
 224                                                destroy_dwork);
 225        struct cgroup_pidlist *tofree = NULL;
 226
 227        mutex_lock(&l->owner->pidlist_mutex);
 228
 229        /*
 230         * Destroy iff we didn't get queued again.  The state won't change
 231         * as destroy_dwork can only be queued while locked.
 232         */
 233        if (!delayed_work_pending(dwork)) {
 234                list_del(&l->links);
 235                pidlist_free(l->list);
 236                put_pid_ns(l->key.ns);
 237                tofree = l;
 238        }
 239
 240        mutex_unlock(&l->owner->pidlist_mutex);
 241        kfree(tofree);
 242}
 243
 244/*
 245 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
 246 * Returns the number of unique elements.
 247 */
 248static int pidlist_uniq(pid_t *list, int length)
 249{
 250        int src, dest = 1;
 251
 252        /*
 253         * we presume the 0th element is unique, so i starts at 1. trivial
 254         * edge cases first; no work needs to be done for either
 255         */
 256        if (length == 0 || length == 1)
 257                return length;
 258        /* src and dest walk down the list; dest counts unique elements */
 259        for (src = 1; src < length; src++) {
 260                /* find next unique element */
 261                while (list[src] == list[src-1]) {
 262                        src++;
 263                        if (src == length)
 264                                goto after;
 265                }
 266                /* dest always points to where the next unique element goes */
 267                list[dest] = list[src];
 268                dest++;
 269        }
 270after:
 271        return dest;
 272}
 273
 274/*
 275 * The two pid files - task and cgroup.procs - guaranteed that the result
 276 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 277 * different per namespace, each namespace needs differently sorted list,
 278 * making it impossible to use, for example, single rbtree of member tasks
 279 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 280 * per open file is dangerous, so cgroup had to implement shared pool of
 281 * pidlists keyed by cgroup and namespace.
 282 */
 283static int cmppid(const void *a, const void *b)
 284{
 285        return *(pid_t *)a - *(pid_t *)b;
 286}
 287
 288static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
 289                                                  enum cgroup_filetype type)
 290{
 291        struct cgroup_pidlist *l;
 292        /* don't need task_nsproxy() if we're looking at ourself */
 293        struct pid_namespace *ns = task_active_pid_ns(current);
 294
 295        lockdep_assert_held(&cgrp->pidlist_mutex);
 296
 297        list_for_each_entry(l, &cgrp->pidlists, links)
 298                if (l->key.type == type && l->key.ns == ns)
 299                        return l;
 300        return NULL;
 301}
 302
 303/*
 304 * find the appropriate pidlist for our purpose (given procs vs tasks)
 305 * returns with the lock on that pidlist already held, and takes care
 306 * of the use count, or returns NULL with no locks held if we're out of
 307 * memory.
 308 */
 309static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
 310                                                enum cgroup_filetype type)
 311{
 312        struct cgroup_pidlist *l;
 313
 314        lockdep_assert_held(&cgrp->pidlist_mutex);
 315
 316        l = cgroup_pidlist_find(cgrp, type);
 317        if (l)
 318                return l;
 319
 320        /* entry not found; create a new one */
 321        l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
 322        if (!l)
 323                return l;
 324
 325        INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
 326        l->key.type = type;
 327        /* don't need task_nsproxy() if we're looking at ourself */
 328        l->key.ns = get_pid_ns(task_active_pid_ns(current));
 329        l->owner = cgrp;
 330        list_add(&l->links, &cgrp->pidlists);
 331        return l;
 332}
 333
 334/**
 335 * cgroup_task_count - count the number of tasks in a cgroup.
 336 * @cgrp: the cgroup in question
 337 */
 338int cgroup_task_count(const struct cgroup *cgrp)
 339{
 340        int count = 0;
 341        struct cgrp_cset_link *link;
 342
 343        spin_lock_irq(&css_set_lock);
 344        list_for_each_entry(link, &cgrp->cset_links, cset_link)
 345                count += link->cset->nr_tasks;
 346        spin_unlock_irq(&css_set_lock);
 347        return count;
 348}
 349
 350/*
 351 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 352 */
 353static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
 354                              struct cgroup_pidlist **lp)
 355{
 356        pid_t *array;
 357        int length;
 358        int pid, n = 0; /* used for populating the array */
 359        struct css_task_iter it;
 360        struct task_struct *tsk;
 361        struct cgroup_pidlist *l;
 362
 363        lockdep_assert_held(&cgrp->pidlist_mutex);
 364
 365        /*
 366         * If cgroup gets more users after we read count, we won't have
 367         * enough space - tough.  This race is indistinguishable to the
 368         * caller from the case that the additional cgroup users didn't
 369         * show up until sometime later on.
 370         */
 371        length = cgroup_task_count(cgrp);
 372        array = pidlist_allocate(length);
 373        if (!array)
 374                return -ENOMEM;
 375        /* now, populate the array */
 376        css_task_iter_start(&cgrp->self, &it);
 377        while ((tsk = css_task_iter_next(&it))) {
 378                if (unlikely(n == length))
 379                        break;
 380                /* get tgid or pid for procs or tasks file respectively */
 381                if (type == CGROUP_FILE_PROCS)
 382                        pid = task_tgid_vnr(tsk);
 383                else
 384                        pid = task_pid_vnr(tsk);
 385                if (pid > 0) /* make sure to only use valid results */
 386                        array[n++] = pid;
 387        }
 388        css_task_iter_end(&it);
 389        length = n;
 390        /* now sort & (if procs) strip out duplicates */
 391        sort(array, length, sizeof(pid_t), cmppid, NULL);
 392        if (type == CGROUP_FILE_PROCS)
 393                length = pidlist_uniq(array, length);
 394
 395        l = cgroup_pidlist_find_create(cgrp, type);
 396        if (!l) {
 397                pidlist_free(array);
 398                return -ENOMEM;
 399        }
 400
 401        /* store array, freeing old if necessary */
 402        pidlist_free(l->list);
 403        l->list = array;
 404        l->length = length;
 405        *lp = l;
 406        return 0;
 407}
 408
 409/*
 410 * seq_file methods for the tasks/procs files. The seq_file position is the
 411 * next pid to display; the seq_file iterator is a pointer to the pid
 412 * in the cgroup->l->list array.
 413 */
 414
 415static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
 416{
 417        /*
 418         * Initially we receive a position value that corresponds to
 419         * one more than the last pid shown (or 0 on the first call or
 420         * after a seek to the start). Use a binary-search to find the
 421         * next pid to display, if any
 422         */
 423        struct kernfs_open_file *of = s->private;
 424        struct cgroup *cgrp = seq_css(s)->cgroup;
 425        struct cgroup_pidlist *l;
 426        enum cgroup_filetype type = seq_cft(s)->private;
 427        int index = 0, pid = *pos;
 428        int *iter, ret;
 429
 430        mutex_lock(&cgrp->pidlist_mutex);
 431
 432        /*
 433         * !NULL @of->priv indicates that this isn't the first start()
 434         * after open.  If the matching pidlist is around, we can use that.
 435         * Look for it.  Note that @of->priv can't be used directly.  It
 436         * could already have been destroyed.
 437         */
 438        if (of->priv)
 439                of->priv = cgroup_pidlist_find(cgrp, type);
 440
 441        /*
 442         * Either this is the first start() after open or the matching
 443         * pidlist has been destroyed inbetween.  Create a new one.
 444         */
 445        if (!of->priv) {
 446                ret = pidlist_array_load(cgrp, type,
 447                                         (struct cgroup_pidlist **)&of->priv);
 448                if (ret)
 449                        return ERR_PTR(ret);
 450        }
 451        l = of->priv;
 452
 453        if (pid) {
 454                int end = l->length;
 455
 456                while (index < end) {
 457                        int mid = (index + end) / 2;
 458                        if (l->list[mid] == pid) {
 459                                index = mid;
 460                                break;
 461                        } else if (l->list[mid] <= pid)
 462                                index = mid + 1;
 463                        else
 464                                end = mid;
 465                }
 466        }
 467        /* If we're off the end of the array, we're done */
 468        if (index >= l->length)
 469                return NULL;
 470        /* Update the abstract position to be the actual pid that we found */
 471        iter = l->list + index;
 472        *pos = *iter;
 473        return iter;
 474}
 475
 476static void cgroup_pidlist_stop(struct seq_file *s, void *v)
 477{
 478        struct kernfs_open_file *of = s->private;
 479        struct cgroup_pidlist *l = of->priv;
 480
 481        if (l)
 482                mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
 483                                 CGROUP_PIDLIST_DESTROY_DELAY);
 484        mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
 485}
 486
 487static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
 488{
 489        struct kernfs_open_file *of = s->private;
 490        struct cgroup_pidlist *l = of->priv;
 491        pid_t *p = v;
 492        pid_t *end = l->list + l->length;
 493        /*
 494         * Advance to the next pid in the array. If this goes off the
 495         * end, we're done
 496         */
 497        p++;
 498        if (p >= end) {
 499                return NULL;
 500        } else {
 501                *pos = *p;
 502                return p;
 503        }
 504}
 505
 506static int cgroup_pidlist_show(struct seq_file *s, void *v)
 507{
 508        seq_printf(s, "%d\n", *(int *)v);
 509
 510        return 0;
 511}
 512
 513static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
 514                                  char *buf, size_t nbytes, loff_t off)
 515{
 516        return __cgroup_procs_write(of, buf, nbytes, off, false);
 517}
 518
 519static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
 520                                          char *buf, size_t nbytes, loff_t off)
 521{
 522        struct cgroup *cgrp;
 523
 524        BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
 525
 526        cgrp = cgroup_kn_lock_live(of->kn, false);
 527        if (!cgrp)
 528                return -ENODEV;
 529        spin_lock(&release_agent_path_lock);
 530        strlcpy(cgrp->root->release_agent_path, strstrip(buf),
 531                sizeof(cgrp->root->release_agent_path));
 532        spin_unlock(&release_agent_path_lock);
 533        cgroup_kn_unlock(of->kn);
 534        return nbytes;
 535}
 536
 537static int cgroup_release_agent_show(struct seq_file *seq, void *v)
 538{
 539        struct cgroup *cgrp = seq_css(seq)->cgroup;
 540
 541        spin_lock(&release_agent_path_lock);
 542        seq_puts(seq, cgrp->root->release_agent_path);
 543        spin_unlock(&release_agent_path_lock);
 544        seq_putc(seq, '\n');
 545        return 0;
 546}
 547
 548static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
 549{
 550        seq_puts(seq, "0\n");
 551        return 0;
 552}
 553
 554static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
 555                                         struct cftype *cft)
 556{
 557        return notify_on_release(css->cgroup);
 558}
 559
 560static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
 561                                          struct cftype *cft, u64 val)
 562{
 563        if (val)
 564                set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
 565        else
 566                clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
 567        return 0;
 568}
 569
 570static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
 571                                      struct cftype *cft)
 572{
 573        return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 574}
 575
 576static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
 577                                       struct cftype *cft, u64 val)
 578{
 579        if (val)
 580                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 581        else
 582                clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 583        return 0;
 584}
 585
 586/* cgroup core interface files for the legacy hierarchies */
 587struct cftype cgroup1_base_files[] = {
 588        {
 589                .name = "cgroup.procs",
 590                .seq_start = cgroup_pidlist_start,
 591                .seq_next = cgroup_pidlist_next,
 592                .seq_stop = cgroup_pidlist_stop,
 593                .seq_show = cgroup_pidlist_show,
 594                .private = CGROUP_FILE_PROCS,
 595                .write = cgroup_procs_write,
 596        },
 597        {
 598                .name = "cgroup.clone_children",
 599                .read_u64 = cgroup_clone_children_read,
 600                .write_u64 = cgroup_clone_children_write,
 601        },
 602        {
 603                .name = "cgroup.sane_behavior",
 604                .flags = CFTYPE_ONLY_ON_ROOT,
 605                .seq_show = cgroup_sane_behavior_show,
 606        },
 607        {
 608                .name = "tasks",
 609                .seq_start = cgroup_pidlist_start,
 610                .seq_next = cgroup_pidlist_next,
 611                .seq_stop = cgroup_pidlist_stop,
 612                .seq_show = cgroup_pidlist_show,
 613                .private = CGROUP_FILE_TASKS,
 614                .write = cgroup_tasks_write,
 615        },
 616        {
 617                .name = "notify_on_release",
 618                .read_u64 = cgroup_read_notify_on_release,
 619                .write_u64 = cgroup_write_notify_on_release,
 620        },
 621        {
 622                .name = "release_agent",
 623                .flags = CFTYPE_ONLY_ON_ROOT,
 624                .seq_show = cgroup_release_agent_show,
 625                .write = cgroup_release_agent_write,
 626                .max_write_len = PATH_MAX - 1,
 627        },
 628        { }     /* terminate */
 629};
 630
 631/* Display information about each subsystem and each hierarchy */
 632static int proc_cgroupstats_show(struct seq_file *m, void *v)
 633{
 634        struct cgroup_subsys *ss;
 635        int i;
 636
 637        seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
 638        /*
 639         * ideally we don't want subsystems moving around while we do this.
 640         * cgroup_mutex is also necessary to guarantee an atomic snapshot of
 641         * subsys/hierarchy state.
 642         */
 643        mutex_lock(&cgroup_mutex);
 644
 645        for_each_subsys(ss, i)
 646                seq_printf(m, "%s\t%d\t%d\t%d\n",
 647                           ss->legacy_name, ss->root->hierarchy_id,
 648                           atomic_read(&ss->root->nr_cgrps),
 649                           cgroup_ssid_enabled(i));
 650
 651        mutex_unlock(&cgroup_mutex);
 652        return 0;
 653}
 654
 655static int cgroupstats_open(struct inode *inode, struct file *file)
 656{
 657        return single_open(file, proc_cgroupstats_show, NULL);
 658}
 659
 660const struct file_operations proc_cgroupstats_operations = {
 661        .open = cgroupstats_open,
 662        .read = seq_read,
 663        .llseek = seq_lseek,
 664        .release = single_release,
 665};
 666
 667/**
 668 * cgroupstats_build - build and fill cgroupstats
 669 * @stats: cgroupstats to fill information into
 670 * @dentry: A dentry entry belonging to the cgroup for which stats have
 671 * been requested.
 672 *
 673 * Build and fill cgroupstats so that taskstats can export it to user
 674 * space.
 675 */
 676int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
 677{
 678        struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
 679        struct cgroup *cgrp;
 680        struct css_task_iter it;
 681        struct task_struct *tsk;
 682
 683        /* it should be kernfs_node belonging to cgroupfs and is a directory */
 684        if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
 685            kernfs_type(kn) != KERNFS_DIR)
 686                return -EINVAL;
 687
 688        mutex_lock(&cgroup_mutex);
 689
 690        /*
 691         * We aren't being called from kernfs and there's no guarantee on
 692         * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
 693         * @kn->priv is RCU safe.  Let's do the RCU dancing.
 694         */
 695        rcu_read_lock();
 696        cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
 697        if (!cgrp || cgroup_is_dead(cgrp)) {
 698                rcu_read_unlock();
 699                mutex_unlock(&cgroup_mutex);
 700                return -ENOENT;
 701        }
 702        rcu_read_unlock();
 703
 704        css_task_iter_start(&cgrp->self, &it);
 705        while ((tsk = css_task_iter_next(&it))) {
 706                switch (tsk->state) {
 707                case TASK_RUNNING:
 708                        stats->nr_running++;
 709                        break;
 710                case TASK_INTERRUPTIBLE:
 711                        stats->nr_sleeping++;
 712                        break;
 713                case TASK_UNINTERRUPTIBLE:
 714                        stats->nr_uninterruptible++;
 715                        break;
 716                case TASK_STOPPED:
 717                        stats->nr_stopped++;
 718                        break;
 719                default:
 720                        if (delayacct_is_task_waiting_on_io(tsk))
 721                                stats->nr_io_wait++;
 722                        break;
 723                }
 724        }
 725        css_task_iter_end(&it);
 726
 727        mutex_unlock(&cgroup_mutex);
 728        return 0;
 729}
 730
 731void cgroup1_check_for_release(struct cgroup *cgrp)
 732{
 733        if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
 734            !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
 735                schedule_work(&cgrp->release_agent_work);
 736}
 737
 738/*
 739 * Notify userspace when a cgroup is released, by running the
 740 * configured release agent with the name of the cgroup (path
 741 * relative to the root of cgroup file system) as the argument.
 742 *
 743 * Most likely, this user command will try to rmdir this cgroup.
 744 *
 745 * This races with the possibility that some other task will be
 746 * attached to this cgroup before it is removed, or that some other
 747 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 748 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 749 * unused, and this cgroup will be reprieved from its death sentence,
 750 * to continue to serve a useful existence.  Next time it's released,
 751 * we will get notified again, if it still has 'notify_on_release' set.
 752 *
 753 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 754 * means only wait until the task is successfully execve()'d.  The
 755 * separate release agent task is forked by call_usermodehelper(),
 756 * then control in this thread returns here, without waiting for the
 757 * release agent task.  We don't bother to wait because the caller of
 758 * this routine has no use for the exit status of the release agent
 759 * task, so no sense holding our caller up for that.
 760 */
 761void cgroup1_release_agent(struct work_struct *work)
 762{
 763        struct cgroup *cgrp =
 764                container_of(work, struct cgroup, release_agent_work);
 765        char *pathbuf = NULL, *agentbuf = NULL;
 766        char *argv[3], *envp[3];
 767        int ret;
 768
 769        mutex_lock(&cgroup_mutex);
 770
 771        pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
 772        agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
 773        if (!pathbuf || !agentbuf)
 774                goto out;
 775
 776        spin_lock_irq(&css_set_lock);
 777        ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
 778        spin_unlock_irq(&css_set_lock);
 779        if (ret < 0 || ret >= PATH_MAX)
 780                goto out;
 781
 782        argv[0] = agentbuf;
 783        argv[1] = pathbuf;
 784        argv[2] = NULL;
 785
 786        /* minimal command environment */
 787        envp[0] = "HOME=/";
 788        envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
 789        envp[2] = NULL;
 790
 791        mutex_unlock(&cgroup_mutex);
 792        call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
 793        goto out_free;
 794out:
 795        mutex_unlock(&cgroup_mutex);
 796out_free:
 797        kfree(agentbuf);
 798        kfree(pathbuf);
 799}
 800
 801/*
 802 * cgroup_rename - Only allow simple rename of directories in place.
 803 */
 804static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
 805                          const char *new_name_str)
 806{
 807        struct cgroup *cgrp = kn->priv;
 808        int ret;
 809
 810        if (kernfs_type(kn) != KERNFS_DIR)
 811                return -ENOTDIR;
 812        if (kn->parent != new_parent)
 813                return -EIO;
 814
 815        /*
 816         * We're gonna grab cgroup_mutex which nests outside kernfs
 817         * active_ref.  kernfs_rename() doesn't require active_ref
 818         * protection.  Break them before grabbing cgroup_mutex.
 819         */
 820        kernfs_break_active_protection(new_parent);
 821        kernfs_break_active_protection(kn);
 822
 823        mutex_lock(&cgroup_mutex);
 824
 825        ret = kernfs_rename(kn, new_parent, new_name_str);
 826        if (!ret)
 827                trace_cgroup_rename(cgrp);
 828
 829        mutex_unlock(&cgroup_mutex);
 830
 831        kernfs_unbreak_active_protection(kn);
 832        kernfs_unbreak_active_protection(new_parent);
 833        return ret;
 834}
 835
 836static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
 837{
 838        struct cgroup_root *root = cgroup_root_from_kf(kf_root);
 839        struct cgroup_subsys *ss;
 840        int ssid;
 841
 842        for_each_subsys(ss, ssid)
 843                if (root->subsys_mask & (1 << ssid))
 844                        seq_show_option(seq, ss->legacy_name, NULL);
 845        if (root->flags & CGRP_ROOT_NOPREFIX)
 846                seq_puts(seq, ",noprefix");
 847        if (root->flags & CGRP_ROOT_XATTR)
 848                seq_puts(seq, ",xattr");
 849
 850        spin_lock(&release_agent_path_lock);
 851        if (strlen(root->release_agent_path))
 852                seq_show_option(seq, "release_agent",
 853                                root->release_agent_path);
 854        spin_unlock(&release_agent_path_lock);
 855
 856        if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
 857                seq_puts(seq, ",clone_children");
 858        if (strlen(root->name))
 859                seq_show_option(seq, "name", root->name);
 860        return 0;
 861}
 862
 863static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
 864{
 865        char *token, *o = data;
 866        bool all_ss = false, one_ss = false;
 867        u16 mask = U16_MAX;
 868        struct cgroup_subsys *ss;
 869        int nr_opts = 0;
 870        int i;
 871
 872#ifdef CONFIG_CPUSETS
 873        mask = ~((u16)1 << cpuset_cgrp_id);
 874#endif
 875
 876        memset(opts, 0, sizeof(*opts));
 877
 878        while ((token = strsep(&o, ",")) != NULL) {
 879                nr_opts++;
 880
 881                if (!*token)
 882                        return -EINVAL;
 883                if (!strcmp(token, "none")) {
 884                        /* Explicitly have no subsystems */
 885                        opts->none = true;
 886                        continue;
 887                }
 888                if (!strcmp(token, "all")) {
 889                        /* Mutually exclusive option 'all' + subsystem name */
 890                        if (one_ss)
 891                                return -EINVAL;
 892                        all_ss = true;
 893                        continue;
 894                }
 895                if (!strcmp(token, "noprefix")) {
 896                        opts->flags |= CGRP_ROOT_NOPREFIX;
 897                        continue;
 898                }
 899                if (!strcmp(token, "clone_children")) {
 900                        opts->cpuset_clone_children = true;
 901                        continue;
 902                }
 903                if (!strcmp(token, "xattr")) {
 904                        opts->flags |= CGRP_ROOT_XATTR;
 905                        continue;
 906                }
 907                if (!strncmp(token, "release_agent=", 14)) {
 908                        /* Specifying two release agents is forbidden */
 909                        if (opts->release_agent)
 910                                return -EINVAL;
 911                        opts->release_agent =
 912                                kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
 913                        if (!opts->release_agent)
 914                                return -ENOMEM;
 915                        continue;
 916                }
 917                if (!strncmp(token, "name=", 5)) {
 918                        const char *name = token + 5;
 919                        /* Can't specify an empty name */
 920                        if (!strlen(name))
 921                                return -EINVAL;
 922                        /* Must match [\w.-]+ */
 923                        for (i = 0; i < strlen(name); i++) {
 924                                char c = name[i];
 925                                if (isalnum(c))
 926                                        continue;
 927                                if ((c == '.') || (c == '-') || (c == '_'))
 928                                        continue;
 929                                return -EINVAL;
 930                        }
 931                        /* Specifying two names is forbidden */
 932                        if (opts->name)
 933                                return -EINVAL;
 934                        opts->name = kstrndup(name,
 935                                              MAX_CGROUP_ROOT_NAMELEN - 1,
 936                                              GFP_KERNEL);
 937                        if (!opts->name)
 938                                return -ENOMEM;
 939
 940                        continue;
 941                }
 942
 943                for_each_subsys(ss, i) {
 944                        if (strcmp(token, ss->legacy_name))
 945                                continue;
 946                        if (!cgroup_ssid_enabled(i))
 947                                continue;
 948                        if (cgroup1_ssid_disabled(i))
 949                                continue;
 950
 951                        /* Mutually exclusive option 'all' + subsystem name */
 952                        if (all_ss)
 953                                return -EINVAL;
 954                        opts->subsys_mask |= (1 << i);
 955                        one_ss = true;
 956
 957                        break;
 958                }
 959                if (i == CGROUP_SUBSYS_COUNT)
 960                        return -ENOENT;
 961        }
 962
 963        /*
 964         * If the 'all' option was specified select all the subsystems,
 965         * otherwise if 'none', 'name=' and a subsystem name options were
 966         * not specified, let's default to 'all'
 967         */
 968        if (all_ss || (!one_ss && !opts->none && !opts->name))
 969                for_each_subsys(ss, i)
 970                        if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
 971                                opts->subsys_mask |= (1 << i);
 972
 973        /*
 974         * We either have to specify by name or by subsystems. (So all
 975         * empty hierarchies must have a name).
 976         */
 977        if (!opts->subsys_mask && !opts->name)
 978                return -EINVAL;
 979
 980        /*
 981         * Option noprefix was introduced just for backward compatibility
 982         * with the old cpuset, so we allow noprefix only if mounting just
 983         * the cpuset subsystem.
 984         */
 985        if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
 986                return -EINVAL;
 987
 988        /* Can't specify "none" and some subsystems */
 989        if (opts->subsys_mask && opts->none)
 990                return -EINVAL;
 991
 992        return 0;
 993}
 994
 995static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
 996{
 997        int ret = 0;
 998        struct cgroup_root *root = cgroup_root_from_kf(kf_root);
 999        struct cgroup_sb_opts opts;
1000        u16 added_mask, removed_mask;
1001
1002        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1003
1004        /* See what subsystems are wanted */
1005        ret = parse_cgroupfs_options(data, &opts);
1006        if (ret)
1007                goto out_unlock;
1008
1009        if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1010                pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1011                        task_tgid_nr(current), current->comm);
1012
1013        added_mask = opts.subsys_mask & ~root->subsys_mask;
1014        removed_mask = root->subsys_mask & ~opts.subsys_mask;
1015
1016        /* Don't allow flags or name to change at remount */
1017        if ((opts.flags ^ root->flags) ||
1018            (opts.name && strcmp(opts.name, root->name))) {
1019                pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1020                       opts.flags, opts.name ?: "", root->flags, root->name);
1021                ret = -EINVAL;
1022                goto out_unlock;
1023        }
1024
1025        /* remounting is not allowed for populated hierarchies */
1026        if (!list_empty(&root->cgrp.self.children)) {
1027                ret = -EBUSY;
1028                goto out_unlock;
1029        }
1030
1031        ret = rebind_subsystems(root, added_mask);
1032        if (ret)
1033                goto out_unlock;
1034
1035        WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1036
1037        if (opts.release_agent) {
1038                spin_lock(&release_agent_path_lock);
1039                strcpy(root->release_agent_path, opts.release_agent);
1040                spin_unlock(&release_agent_path_lock);
1041        }
1042
1043        trace_cgroup_remount(root);
1044
1045 out_unlock:
1046        kfree(opts.release_agent);
1047        kfree(opts.name);
1048        mutex_unlock(&cgroup_mutex);
1049        return ret;
1050}
1051
1052struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1053        .rename                 = cgroup1_rename,
1054        .show_options           = cgroup1_show_options,
1055        .remount_fs             = cgroup1_remount,
1056        .mkdir                  = cgroup_mkdir,
1057        .rmdir                  = cgroup_rmdir,
1058        .show_path              = cgroup_show_path,
1059};
1060
1061struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
1062                             void *data, unsigned long magic,
1063                             struct cgroup_namespace *ns)
1064{
1065        struct super_block *pinned_sb = NULL;
1066        struct cgroup_sb_opts opts;
1067        struct cgroup_root *root;
1068        struct cgroup_subsys *ss;
1069        struct dentry *dentry;
1070        int i, ret;
1071        bool new_root = false;
1072
1073        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1074
1075        /* First find the desired set of subsystems */
1076        ret = parse_cgroupfs_options(data, &opts);
1077        if (ret)
1078                goto out_unlock;
1079
1080        /*
1081         * Destruction of cgroup root is asynchronous, so subsystems may
1082         * still be dying after the previous unmount.  Let's drain the
1083         * dying subsystems.  We just need to ensure that the ones
1084         * unmounted previously finish dying and don't care about new ones
1085         * starting.  Testing ref liveliness is good enough.
1086         */
1087        for_each_subsys(ss, i) {
1088                if (!(opts.subsys_mask & (1 << i)) ||
1089                    ss->root == &cgrp_dfl_root)
1090                        continue;
1091
1092                if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1093                        mutex_unlock(&cgroup_mutex);
1094                        msleep(10);
1095                        ret = restart_syscall();
1096                        goto out_free;
1097                }
1098                cgroup_put(&ss->root->cgrp);
1099        }
1100
1101        for_each_root(root) {
1102                bool name_match = false;
1103
1104                if (root == &cgrp_dfl_root)
1105                        continue;
1106
1107                /*
1108                 * If we asked for a name then it must match.  Also, if
1109                 * name matches but sybsys_mask doesn't, we should fail.
1110                 * Remember whether name matched.
1111                 */
1112                if (opts.name) {
1113                        if (strcmp(opts.name, root->name))
1114                                continue;
1115                        name_match = true;
1116                }
1117
1118                /*
1119                 * If we asked for subsystems (or explicitly for no
1120                 * subsystems) then they must match.
1121                 */
1122                if ((opts.subsys_mask || opts.none) &&
1123                    (opts.subsys_mask != root->subsys_mask)) {
1124                        if (!name_match)
1125                                continue;
1126                        ret = -EBUSY;
1127                        goto out_unlock;
1128                }
1129
1130                if (root->flags ^ opts.flags)
1131                        pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1132
1133                /*
1134                 * We want to reuse @root whose lifetime is governed by its
1135                 * ->cgrp.  Let's check whether @root is alive and keep it
1136                 * that way.  As cgroup_kill_sb() can happen anytime, we
1137                 * want to block it by pinning the sb so that @root doesn't
1138                 * get killed before mount is complete.
1139                 *
1140                 * With the sb pinned, tryget_live can reliably indicate
1141                 * whether @root can be reused.  If it's being killed,
1142                 * drain it.  We can use wait_queue for the wait but this
1143                 * path is super cold.  Let's just sleep a bit and retry.
1144                 */
1145                pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1146                if (IS_ERR(pinned_sb) ||
1147                    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1148                        mutex_unlock(&cgroup_mutex);
1149                        if (!IS_ERR_OR_NULL(pinned_sb))
1150                                deactivate_super(pinned_sb);
1151                        msleep(10);
1152                        ret = restart_syscall();
1153                        goto out_free;
1154                }
1155
1156                ret = 0;
1157                goto out_unlock;
1158        }
1159
1160        /*
1161         * No such thing, create a new one.  name= matching without subsys
1162         * specification is allowed for already existing hierarchies but we
1163         * can't create new one without subsys specification.
1164         */
1165        if (!opts.subsys_mask && !opts.none) {
1166                ret = -EINVAL;
1167                goto out_unlock;
1168        }
1169
1170        /* Hierarchies may only be created in the initial cgroup namespace. */
1171        if (ns != &init_cgroup_ns) {
1172                ret = -EPERM;
1173                goto out_unlock;
1174        }
1175
1176        root = kzalloc(sizeof(*root), GFP_KERNEL);
1177        if (!root) {
1178                ret = -ENOMEM;
1179                goto out_unlock;
1180        }
1181        new_root = true;
1182
1183        init_cgroup_root(root, &opts);
1184
1185        ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD);
1186        if (ret)
1187                cgroup_free_root(root);
1188
1189out_unlock:
1190        mutex_unlock(&cgroup_mutex);
1191out_free:
1192        kfree(opts.release_agent);
1193        kfree(opts.name);
1194
1195        if (ret)
1196                return ERR_PTR(ret);
1197
1198        dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
1199                                 CGROUP_SUPER_MAGIC, ns);
1200
1201        /*
1202         * There's a race window after we release cgroup_mutex and before
1203         * allocating a superblock. Make sure a concurrent process won't
1204         * be able to re-use the root during this window by delaying the
1205         * initialization of root refcnt.
1206         */
1207        if (new_root) {
1208                mutex_lock(&cgroup_mutex);
1209                percpu_ref_reinit(&root->cgrp.self.refcnt);
1210                mutex_unlock(&cgroup_mutex);
1211        }
1212
1213        /*
1214         * If @pinned_sb, we're reusing an existing root and holding an
1215         * extra ref on its sb.  Mount is complete.  Put the extra ref.
1216         */
1217        if (pinned_sb)
1218                deactivate_super(pinned_sb);
1219
1220        return dentry;
1221}
1222
1223static int __init cgroup1_wq_init(void)
1224{
1225        /*
1226         * Used to destroy pidlists and separate to serve as flush domain.
1227         * Cap @max_active to 1 too.
1228         */
1229        cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1230                                                    0, 1);
1231        BUG_ON(!cgroup_pidlist_destroy_wq);
1232        return 0;
1233}
1234core_initcall(cgroup1_wq_init);
1235
1236static int __init cgroup_no_v1(char *str)
1237{
1238        struct cgroup_subsys *ss;
1239        char *token;
1240        int i;
1241
1242        while ((token = strsep(&str, ",")) != NULL) {
1243                if (!*token)
1244                        continue;
1245
1246                if (!strcmp(token, "all")) {
1247                        cgroup_no_v1_mask = U16_MAX;
1248                        break;
1249                }
1250
1251                for_each_subsys(ss, i) {
1252                        if (strcmp(token, ss->name) &&
1253                            strcmp(token, ss->legacy_name))
1254                                continue;
1255
1256                        cgroup_no_v1_mask |= 1 << i;
1257                }
1258        }
1259        return 1;
1260}
1261__setup("cgroup_no_v1=", cgroup_no_v1);
1262