linux/kernel/cgroup/cgroup-v1.c
<<
>>
Prefs
   1#include "cgroup-internal.h"
   2
   3#include <linux/ctype.h>
   4#include <linux/kmod.h>
   5#include <linux/sort.h>
   6#include <linux/delay.h>
   7#include <linux/mm.h>
   8#include <linux/sched/signal.h>
   9#include <linux/sched/task.h>
  10#include <linux/magic.h>
  11#include <linux/slab.h>
  12#include <linux/vmalloc.h>
  13#include <linux/delayacct.h>
  14#include <linux/pid_namespace.h>
  15#include <linux/cgroupstats.h>
  16
  17#include <trace/events/cgroup.h>
  18
  19/*
  20 * pidlists linger the following amount before being destroyed.  The goal
  21 * is avoiding frequent destruction in the middle of consecutive read calls
  22 * Expiring in the middle is a performance problem not a correctness one.
  23 * 1 sec should be enough.
  24 */
  25#define CGROUP_PIDLIST_DESTROY_DELAY    HZ
  26
  27/* Controllers blocked by the commandline in v1 */
  28static u16 cgroup_no_v1_mask;
  29
  30/*
  31 * pidlist destructions need to be flushed on cgroup destruction.  Use a
  32 * separate workqueue as flush domain.
  33 */
  34static struct workqueue_struct *cgroup_pidlist_destroy_wq;
  35
  36/*
  37 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
  38 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
  39 */
  40static DEFINE_SPINLOCK(release_agent_path_lock);
  41
  42bool cgroup1_ssid_disabled(int ssid)
  43{
  44        return cgroup_no_v1_mask & (1 << ssid);
  45}
  46
  47/**
  48 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
  49 * @from: attach to all cgroups of a given task
  50 * @tsk: the task to be attached
  51 */
  52int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
  53{
  54        struct cgroup_root *root;
  55        int retval = 0;
  56
  57        mutex_lock(&cgroup_mutex);
  58        percpu_down_write(&cgroup_threadgroup_rwsem);
  59        for_each_root(root) {
  60                struct cgroup *from_cgrp;
  61
  62                if (root == &cgrp_dfl_root)
  63                        continue;
  64
  65                spin_lock_irq(&css_set_lock);
  66                from_cgrp = task_cgroup_from_root(from, root);
  67                spin_unlock_irq(&css_set_lock);
  68
  69                retval = cgroup_attach_task(from_cgrp, tsk, false);
  70                if (retval)
  71                        break;
  72        }
  73        percpu_up_write(&cgroup_threadgroup_rwsem);
  74        mutex_unlock(&cgroup_mutex);
  75
  76        return retval;
  77}
  78EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
  79
  80/**
  81 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
  82 * @to: cgroup to which the tasks will be moved
  83 * @from: cgroup in which the tasks currently reside
  84 *
  85 * Locking rules between cgroup_post_fork() and the migration path
  86 * guarantee that, if a task is forking while being migrated, the new child
  87 * is guaranteed to be either visible in the source cgroup after the
  88 * parent's migration is complete or put into the target cgroup.  No task
  89 * can slip out of migration through forking.
  90 */
  91int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
  92{
  93        DEFINE_CGROUP_MGCTX(mgctx);
  94        struct cgrp_cset_link *link;
  95        struct css_task_iter it;
  96        struct task_struct *task;
  97        int ret;
  98
  99        if (cgroup_on_dfl(to))
 100                return -EINVAL;
 101
 102        ret = cgroup_migrate_vet_dst(to);
 103        if (ret)
 104                return ret;
 105
 106        mutex_lock(&cgroup_mutex);
 107
 108        percpu_down_write(&cgroup_threadgroup_rwsem);
 109
 110        /* all tasks in @from are being moved, all csets are source */
 111        spin_lock_irq(&css_set_lock);
 112        list_for_each_entry(link, &from->cset_links, cset_link)
 113                cgroup_migrate_add_src(link->cset, to, &mgctx);
 114        spin_unlock_irq(&css_set_lock);
 115
 116        ret = cgroup_migrate_prepare_dst(&mgctx);
 117        if (ret)
 118                goto out_err;
 119
 120        /*
 121         * Migrate tasks one-by-one until @from is empty.  This fails iff
 122         * ->can_attach() fails.
 123         */
 124        do {
 125                css_task_iter_start(&from->self, 0, &it);
 126
 127                do {
 128                        task = css_task_iter_next(&it);
 129                } while (task && (task->flags & PF_EXITING));
 130
 131                if (task)
 132                        get_task_struct(task);
 133                css_task_iter_end(&it);
 134
 135                if (task) {
 136                        ret = cgroup_migrate(task, false, &mgctx);
 137                        if (!ret)
 138                                trace_cgroup_transfer_tasks(to, task, false);
 139                        put_task_struct(task);
 140                }
 141        } while (task && !ret);
 142out_err:
 143        cgroup_migrate_finish(&mgctx);
 144        percpu_up_write(&cgroup_threadgroup_rwsem);
 145        mutex_unlock(&cgroup_mutex);
 146        return ret;
 147}
 148
 149/*
 150 * Stuff for reading the 'tasks'/'procs' files.
 151 *
 152 * Reading this file can return large amounts of data if a cgroup has
 153 * *lots* of attached tasks. So it may need several calls to read(),
 154 * but we cannot guarantee that the information we produce is correct
 155 * unless we produce it entirely atomically.
 156 *
 157 */
 158
 159/* which pidlist file are we talking about? */
 160enum cgroup_filetype {
 161        CGROUP_FILE_PROCS,
 162        CGROUP_FILE_TASKS,
 163};
 164
 165/*
 166 * A pidlist is a list of pids that virtually represents the contents of one
 167 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 168 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 169 * to the cgroup.
 170 */
 171struct cgroup_pidlist {
 172        /*
 173         * used to find which pidlist is wanted. doesn't change as long as
 174         * this particular list stays in the list.
 175        */
 176        struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
 177        /* array of xids */
 178        pid_t *list;
 179        /* how many elements the above list has */
 180        int length;
 181        /* each of these stored in a list by its cgroup */
 182        struct list_head links;
 183        /* pointer to the cgroup we belong to, for list removal purposes */
 184        struct cgroup *owner;
 185        /* for delayed destruction */
 186        struct delayed_work destroy_dwork;
 187};
 188
 189/*
 190 * The following two functions "fix" the issue where there are more pids
 191 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
 192 * TODO: replace with a kernel-wide solution to this problem
 193 */
 194#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
 195static void *pidlist_allocate(int count)
 196{
 197        if (PIDLIST_TOO_LARGE(count))
 198                return vmalloc(count * sizeof(pid_t));
 199        else
 200                return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
 201}
 202
 203static void pidlist_free(void *p)
 204{
 205        kvfree(p);
 206}
 207
 208/*
 209 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 210 * should be left afterwards.
 211 */
 212void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
 213{
 214        struct cgroup_pidlist *l, *tmp_l;
 215
 216        mutex_lock(&cgrp->pidlist_mutex);
 217        list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
 218                mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
 219        mutex_unlock(&cgrp->pidlist_mutex);
 220
 221        flush_workqueue(cgroup_pidlist_destroy_wq);
 222        BUG_ON(!list_empty(&cgrp->pidlists));
 223}
 224
 225static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
 226{
 227        struct delayed_work *dwork = to_delayed_work(work);
 228        struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
 229                                                destroy_dwork);
 230        struct cgroup_pidlist *tofree = NULL;
 231
 232        mutex_lock(&l->owner->pidlist_mutex);
 233
 234        /*
 235         * Destroy iff we didn't get queued again.  The state won't change
 236         * as destroy_dwork can only be queued while locked.
 237         */
 238        if (!delayed_work_pending(dwork)) {
 239                list_del(&l->links);
 240                pidlist_free(l->list);
 241                put_pid_ns(l->key.ns);
 242                tofree = l;
 243        }
 244
 245        mutex_unlock(&l->owner->pidlist_mutex);
 246        kfree(tofree);
 247}
 248
 249/*
 250 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
 251 * Returns the number of unique elements.
 252 */
 253static int pidlist_uniq(pid_t *list, int length)
 254{
 255        int src, dest = 1;
 256
 257        /*
 258         * we presume the 0th element is unique, so i starts at 1. trivial
 259         * edge cases first; no work needs to be done for either
 260         */
 261        if (length == 0 || length == 1)
 262                return length;
 263        /* src and dest walk down the list; dest counts unique elements */
 264        for (src = 1; src < length; src++) {
 265                /* find next unique element */
 266                while (list[src] == list[src-1]) {
 267                        src++;
 268                        if (src == length)
 269                                goto after;
 270                }
 271                /* dest always points to where the next unique element goes */
 272                list[dest] = list[src];
 273                dest++;
 274        }
 275after:
 276        return dest;
 277}
 278
 279/*
 280 * The two pid files - task and cgroup.procs - guaranteed that the result
 281 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 282 * different per namespace, each namespace needs differently sorted list,
 283 * making it impossible to use, for example, single rbtree of member tasks
 284 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 285 * per open file is dangerous, so cgroup had to implement shared pool of
 286 * pidlists keyed by cgroup and namespace.
 287 */
 288static int cmppid(const void *a, const void *b)
 289{
 290        return *(pid_t *)a - *(pid_t *)b;
 291}
 292
 293static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
 294                                                  enum cgroup_filetype type)
 295{
 296        struct cgroup_pidlist *l;
 297        /* don't need task_nsproxy() if we're looking at ourself */
 298        struct pid_namespace *ns = task_active_pid_ns(current);
 299
 300        lockdep_assert_held(&cgrp->pidlist_mutex);
 301
 302        list_for_each_entry(l, &cgrp->pidlists, links)
 303                if (l->key.type == type && l->key.ns == ns)
 304                        return l;
 305        return NULL;
 306}
 307
 308/*
 309 * find the appropriate pidlist for our purpose (given procs vs tasks)
 310 * returns with the lock on that pidlist already held, and takes care
 311 * of the use count, or returns NULL with no locks held if we're out of
 312 * memory.
 313 */
 314static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
 315                                                enum cgroup_filetype type)
 316{
 317        struct cgroup_pidlist *l;
 318
 319        lockdep_assert_held(&cgrp->pidlist_mutex);
 320
 321        l = cgroup_pidlist_find(cgrp, type);
 322        if (l)
 323                return l;
 324
 325        /* entry not found; create a new one */
 326        l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
 327        if (!l)
 328                return l;
 329
 330        INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
 331        l->key.type = type;
 332        /* don't need task_nsproxy() if we're looking at ourself */
 333        l->key.ns = get_pid_ns(task_active_pid_ns(current));
 334        l->owner = cgrp;
 335        list_add(&l->links, &cgrp->pidlists);
 336        return l;
 337}
 338
 339/**
 340 * cgroup_task_count - count the number of tasks in a cgroup.
 341 * @cgrp: the cgroup in question
 342 */
 343int cgroup_task_count(const struct cgroup *cgrp)
 344{
 345        int count = 0;
 346        struct cgrp_cset_link *link;
 347
 348        spin_lock_irq(&css_set_lock);
 349        list_for_each_entry(link, &cgrp->cset_links, cset_link)
 350                count += link->cset->nr_tasks;
 351        spin_unlock_irq(&css_set_lock);
 352        return count;
 353}
 354
 355/*
 356 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 357 */
 358static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
 359                              struct cgroup_pidlist **lp)
 360{
 361        pid_t *array;
 362        int length;
 363        int pid, n = 0; /* used for populating the array */
 364        struct css_task_iter it;
 365        struct task_struct *tsk;
 366        struct cgroup_pidlist *l;
 367
 368        lockdep_assert_held(&cgrp->pidlist_mutex);
 369
 370        /*
 371         * If cgroup gets more users after we read count, we won't have
 372         * enough space - tough.  This race is indistinguishable to the
 373         * caller from the case that the additional cgroup users didn't
 374         * show up until sometime later on.
 375         */
 376        length = cgroup_task_count(cgrp);
 377        array = pidlist_allocate(length);
 378        if (!array)
 379                return -ENOMEM;
 380        /* now, populate the array */
 381        css_task_iter_start(&cgrp->self, 0, &it);
 382        while ((tsk = css_task_iter_next(&it))) {
 383                if (unlikely(n == length))
 384                        break;
 385                /* get tgid or pid for procs or tasks file respectively */
 386                if (type == CGROUP_FILE_PROCS)
 387                        pid = task_tgid_vnr(tsk);
 388                else
 389                        pid = task_pid_vnr(tsk);
 390                if (pid > 0) /* make sure to only use valid results */
 391                        array[n++] = pid;
 392        }
 393        css_task_iter_end(&it);
 394        length = n;
 395        /* now sort & (if procs) strip out duplicates */
 396        sort(array, length, sizeof(pid_t), cmppid, NULL);
 397        if (type == CGROUP_FILE_PROCS)
 398                length = pidlist_uniq(array, length);
 399
 400        l = cgroup_pidlist_find_create(cgrp, type);
 401        if (!l) {
 402                pidlist_free(array);
 403                return -ENOMEM;
 404        }
 405
 406        /* store array, freeing old if necessary */
 407        pidlist_free(l->list);
 408        l->list = array;
 409        l->length = length;
 410        *lp = l;
 411        return 0;
 412}
 413
 414/*
 415 * seq_file methods for the tasks/procs files. The seq_file position is the
 416 * next pid to display; the seq_file iterator is a pointer to the pid
 417 * in the cgroup->l->list array.
 418 */
 419
 420static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
 421{
 422        /*
 423         * Initially we receive a position value that corresponds to
 424         * one more than the last pid shown (or 0 on the first call or
 425         * after a seek to the start). Use a binary-search to find the
 426         * next pid to display, if any
 427         */
 428        struct kernfs_open_file *of = s->private;
 429        struct cgroup *cgrp = seq_css(s)->cgroup;
 430        struct cgroup_pidlist *l;
 431        enum cgroup_filetype type = seq_cft(s)->private;
 432        int index = 0, pid = *pos;
 433        int *iter, ret;
 434
 435        mutex_lock(&cgrp->pidlist_mutex);
 436
 437        /*
 438         * !NULL @of->priv indicates that this isn't the first start()
 439         * after open.  If the matching pidlist is around, we can use that.
 440         * Look for it.  Note that @of->priv can't be used directly.  It
 441         * could already have been destroyed.
 442         */
 443        if (of->priv)
 444                of->priv = cgroup_pidlist_find(cgrp, type);
 445
 446        /*
 447         * Either this is the first start() after open or the matching
 448         * pidlist has been destroyed inbetween.  Create a new one.
 449         */
 450        if (!of->priv) {
 451                ret = pidlist_array_load(cgrp, type,
 452                                         (struct cgroup_pidlist **)&of->priv);
 453                if (ret)
 454                        return ERR_PTR(ret);
 455        }
 456        l = of->priv;
 457
 458        if (pid) {
 459                int end = l->length;
 460
 461                while (index < end) {
 462                        int mid = (index + end) / 2;
 463                        if (l->list[mid] == pid) {
 464                                index = mid;
 465                                break;
 466                        } else if (l->list[mid] <= pid)
 467                                index = mid + 1;
 468                        else
 469                                end = mid;
 470                }
 471        }
 472        /* If we're off the end of the array, we're done */
 473        if (index >= l->length)
 474                return NULL;
 475        /* Update the abstract position to be the actual pid that we found */
 476        iter = l->list + index;
 477        *pos = *iter;
 478        return iter;
 479}
 480
 481static void cgroup_pidlist_stop(struct seq_file *s, void *v)
 482{
 483        struct kernfs_open_file *of = s->private;
 484        struct cgroup_pidlist *l = of->priv;
 485
 486        if (l)
 487                mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
 488                                 CGROUP_PIDLIST_DESTROY_DELAY);
 489        mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
 490}
 491
 492static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
 493{
 494        struct kernfs_open_file *of = s->private;
 495        struct cgroup_pidlist *l = of->priv;
 496        pid_t *p = v;
 497        pid_t *end = l->list + l->length;
 498        /*
 499         * Advance to the next pid in the array. If this goes off the
 500         * end, we're done
 501         */
 502        p++;
 503        if (p >= end) {
 504                return NULL;
 505        } else {
 506                *pos = *p;
 507                return p;
 508        }
 509}
 510
 511static int cgroup_pidlist_show(struct seq_file *s, void *v)
 512{
 513        seq_printf(s, "%d\n", *(int *)v);
 514
 515        return 0;
 516}
 517
 518static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
 519                                     char *buf, size_t nbytes, loff_t off,
 520                                     bool threadgroup)
 521{
 522        struct cgroup *cgrp;
 523        struct task_struct *task;
 524        const struct cred *cred, *tcred;
 525        ssize_t ret;
 526
 527        cgrp = cgroup_kn_lock_live(of->kn, false);
 528        if (!cgrp)
 529                return -ENODEV;
 530
 531        task = cgroup_procs_write_start(buf, threadgroup);
 532        ret = PTR_ERR_OR_ZERO(task);
 533        if (ret)
 534                goto out_unlock;
 535
 536        /*
 537         * Even if we're attaching all tasks in the thread group, we only
 538         * need to check permissions on one of them.
 539         */
 540        cred = current_cred();
 541        tcred = get_task_cred(task);
 542        if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 543            !uid_eq(cred->euid, tcred->uid) &&
 544            !uid_eq(cred->euid, tcred->suid))
 545                ret = -EACCES;
 546        put_cred(tcred);
 547        if (ret)
 548                goto out_finish;
 549
 550        ret = cgroup_attach_task(cgrp, task, threadgroup);
 551
 552out_finish:
 553        cgroup_procs_write_finish(task);
 554out_unlock:
 555        cgroup_kn_unlock(of->kn);
 556
 557        return ret ?: nbytes;
 558}
 559
 560static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
 561                                   char *buf, size_t nbytes, loff_t off)
 562{
 563        return __cgroup1_procs_write(of, buf, nbytes, off, true);
 564}
 565
 566static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
 567                                   char *buf, size_t nbytes, loff_t off)
 568{
 569        return __cgroup1_procs_write(of, buf, nbytes, off, false);
 570}
 571
 572static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
 573                                          char *buf, size_t nbytes, loff_t off)
 574{
 575        struct cgroup *cgrp;
 576
 577        BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
 578
 579        cgrp = cgroup_kn_lock_live(of->kn, false);
 580        if (!cgrp)
 581                return -ENODEV;
 582        spin_lock(&release_agent_path_lock);
 583        strlcpy(cgrp->root->release_agent_path, strstrip(buf),
 584                sizeof(cgrp->root->release_agent_path));
 585        spin_unlock(&release_agent_path_lock);
 586        cgroup_kn_unlock(of->kn);
 587        return nbytes;
 588}
 589
 590static int cgroup_release_agent_show(struct seq_file *seq, void *v)
 591{
 592        struct cgroup *cgrp = seq_css(seq)->cgroup;
 593
 594        spin_lock(&release_agent_path_lock);
 595        seq_puts(seq, cgrp->root->release_agent_path);
 596        spin_unlock(&release_agent_path_lock);
 597        seq_putc(seq, '\n');
 598        return 0;
 599}
 600
 601static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
 602{
 603        seq_puts(seq, "0\n");
 604        return 0;
 605}
 606
 607static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
 608                                         struct cftype *cft)
 609{
 610        return notify_on_release(css->cgroup);
 611}
 612
 613static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
 614                                          struct cftype *cft, u64 val)
 615{
 616        if (val)
 617                set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
 618        else
 619                clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
 620        return 0;
 621}
 622
 623static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
 624                                      struct cftype *cft)
 625{
 626        return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 627}
 628
 629static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
 630                                       struct cftype *cft, u64 val)
 631{
 632        if (val)
 633                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 634        else
 635                clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 636        return 0;
 637}
 638
 639/* cgroup core interface files for the legacy hierarchies */
 640struct cftype cgroup1_base_files[] = {
 641        {
 642                .name = "cgroup.procs",
 643                .seq_start = cgroup_pidlist_start,
 644                .seq_next = cgroup_pidlist_next,
 645                .seq_stop = cgroup_pidlist_stop,
 646                .seq_show = cgroup_pidlist_show,
 647                .private = CGROUP_FILE_PROCS,
 648                .write = cgroup1_procs_write,
 649        },
 650        {
 651                .name = "cgroup.clone_children",
 652                .read_u64 = cgroup_clone_children_read,
 653                .write_u64 = cgroup_clone_children_write,
 654        },
 655        {
 656                .name = "cgroup.sane_behavior",
 657                .flags = CFTYPE_ONLY_ON_ROOT,
 658                .seq_show = cgroup_sane_behavior_show,
 659        },
 660        {
 661                .name = "tasks",
 662                .seq_start = cgroup_pidlist_start,
 663                .seq_next = cgroup_pidlist_next,
 664                .seq_stop = cgroup_pidlist_stop,
 665                .seq_show = cgroup_pidlist_show,
 666                .private = CGROUP_FILE_TASKS,
 667                .write = cgroup1_tasks_write,
 668        },
 669        {
 670                .name = "notify_on_release",
 671                .read_u64 = cgroup_read_notify_on_release,
 672                .write_u64 = cgroup_write_notify_on_release,
 673        },
 674        {
 675                .name = "release_agent",
 676                .flags = CFTYPE_ONLY_ON_ROOT,
 677                .seq_show = cgroup_release_agent_show,
 678                .write = cgroup_release_agent_write,
 679                .max_write_len = PATH_MAX - 1,
 680        },
 681        { }     /* terminate */
 682};
 683
 684/* Display information about each subsystem and each hierarchy */
 685static int proc_cgroupstats_show(struct seq_file *m, void *v)
 686{
 687        struct cgroup_subsys *ss;
 688        int i;
 689
 690        seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
 691        /*
 692         * ideally we don't want subsystems moving around while we do this.
 693         * cgroup_mutex is also necessary to guarantee an atomic snapshot of
 694         * subsys/hierarchy state.
 695         */
 696        mutex_lock(&cgroup_mutex);
 697
 698        for_each_subsys(ss, i)
 699                seq_printf(m, "%s\t%d\t%d\t%d\n",
 700                           ss->legacy_name, ss->root->hierarchy_id,
 701                           atomic_read(&ss->root->nr_cgrps),
 702                           cgroup_ssid_enabled(i));
 703
 704        mutex_unlock(&cgroup_mutex);
 705        return 0;
 706}
 707
 708static int cgroupstats_open(struct inode *inode, struct file *file)
 709{
 710        return single_open(file, proc_cgroupstats_show, NULL);
 711}
 712
 713const struct file_operations proc_cgroupstats_operations = {
 714        .open = cgroupstats_open,
 715        .read = seq_read,
 716        .llseek = seq_lseek,
 717        .release = single_release,
 718};
 719
 720/**
 721 * cgroupstats_build - build and fill cgroupstats
 722 * @stats: cgroupstats to fill information into
 723 * @dentry: A dentry entry belonging to the cgroup for which stats have
 724 * been requested.
 725 *
 726 * Build and fill cgroupstats so that taskstats can export it to user
 727 * space.
 728 */
 729int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
 730{
 731        struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
 732        struct cgroup *cgrp;
 733        struct css_task_iter it;
 734        struct task_struct *tsk;
 735
 736        /* it should be kernfs_node belonging to cgroupfs and is a directory */
 737        if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
 738            kernfs_type(kn) != KERNFS_DIR)
 739                return -EINVAL;
 740
 741        mutex_lock(&cgroup_mutex);
 742
 743        /*
 744         * We aren't being called from kernfs and there's no guarantee on
 745         * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
 746         * @kn->priv is RCU safe.  Let's do the RCU dancing.
 747         */
 748        rcu_read_lock();
 749        cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
 750        if (!cgrp || cgroup_is_dead(cgrp)) {
 751                rcu_read_unlock();
 752                mutex_unlock(&cgroup_mutex);
 753                return -ENOENT;
 754        }
 755        rcu_read_unlock();
 756
 757        css_task_iter_start(&cgrp->self, 0, &it);
 758        while ((tsk = css_task_iter_next(&it))) {
 759                switch (tsk->state) {
 760                case TASK_RUNNING:
 761                        stats->nr_running++;
 762                        break;
 763                case TASK_INTERRUPTIBLE:
 764                        stats->nr_sleeping++;
 765                        break;
 766                case TASK_UNINTERRUPTIBLE:
 767                        stats->nr_uninterruptible++;
 768                        break;
 769                case TASK_STOPPED:
 770                        stats->nr_stopped++;
 771                        break;
 772                default:
 773                        if (delayacct_is_task_waiting_on_io(tsk))
 774                                stats->nr_io_wait++;
 775                        break;
 776                }
 777        }
 778        css_task_iter_end(&it);
 779
 780        mutex_unlock(&cgroup_mutex);
 781        return 0;
 782}
 783
 784void cgroup1_check_for_release(struct cgroup *cgrp)
 785{
 786        if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
 787            !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
 788                schedule_work(&cgrp->release_agent_work);
 789}
 790
 791/*
 792 * Notify userspace when a cgroup is released, by running the
 793 * configured release agent with the name of the cgroup (path
 794 * relative to the root of cgroup file system) as the argument.
 795 *
 796 * Most likely, this user command will try to rmdir this cgroup.
 797 *
 798 * This races with the possibility that some other task will be
 799 * attached to this cgroup before it is removed, or that some other
 800 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 801 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 802 * unused, and this cgroup will be reprieved from its death sentence,
 803 * to continue to serve a useful existence.  Next time it's released,
 804 * we will get notified again, if it still has 'notify_on_release' set.
 805 *
 806 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 807 * means only wait until the task is successfully execve()'d.  The
 808 * separate release agent task is forked by call_usermodehelper(),
 809 * then control in this thread returns here, without waiting for the
 810 * release agent task.  We don't bother to wait because the caller of
 811 * this routine has no use for the exit status of the release agent
 812 * task, so no sense holding our caller up for that.
 813 */
 814void cgroup1_release_agent(struct work_struct *work)
 815{
 816        struct cgroup *cgrp =
 817                container_of(work, struct cgroup, release_agent_work);
 818        char *pathbuf = NULL, *agentbuf = NULL;
 819        char *argv[3], *envp[3];
 820        int ret;
 821
 822        mutex_lock(&cgroup_mutex);
 823
 824        pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
 825        agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
 826        if (!pathbuf || !agentbuf)
 827                goto out;
 828
 829        spin_lock_irq(&css_set_lock);
 830        ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
 831        spin_unlock_irq(&css_set_lock);
 832        if (ret < 0 || ret >= PATH_MAX)
 833                goto out;
 834
 835        argv[0] = agentbuf;
 836        argv[1] = pathbuf;
 837        argv[2] = NULL;
 838
 839        /* minimal command environment */
 840        envp[0] = "HOME=/";
 841        envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
 842        envp[2] = NULL;
 843
 844        mutex_unlock(&cgroup_mutex);
 845        call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
 846        goto out_free;
 847out:
 848        mutex_unlock(&cgroup_mutex);
 849out_free:
 850        kfree(agentbuf);
 851        kfree(pathbuf);
 852}
 853
 854/*
 855 * cgroup_rename - Only allow simple rename of directories in place.
 856 */
 857static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
 858                          const char *new_name_str)
 859{
 860        struct cgroup *cgrp = kn->priv;
 861        int ret;
 862
 863        if (kernfs_type(kn) != KERNFS_DIR)
 864                return -ENOTDIR;
 865        if (kn->parent != new_parent)
 866                return -EIO;
 867
 868        /*
 869         * We're gonna grab cgroup_mutex which nests outside kernfs
 870         * active_ref.  kernfs_rename() doesn't require active_ref
 871         * protection.  Break them before grabbing cgroup_mutex.
 872         */
 873        kernfs_break_active_protection(new_parent);
 874        kernfs_break_active_protection(kn);
 875
 876        mutex_lock(&cgroup_mutex);
 877
 878        ret = kernfs_rename(kn, new_parent, new_name_str);
 879        if (!ret)
 880                trace_cgroup_rename(cgrp);
 881
 882        mutex_unlock(&cgroup_mutex);
 883
 884        kernfs_unbreak_active_protection(kn);
 885        kernfs_unbreak_active_protection(new_parent);
 886        return ret;
 887}
 888
 889static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
 890{
 891        struct cgroup_root *root = cgroup_root_from_kf(kf_root);
 892        struct cgroup_subsys *ss;
 893        int ssid;
 894
 895        for_each_subsys(ss, ssid)
 896                if (root->subsys_mask & (1 << ssid))
 897                        seq_show_option(seq, ss->legacy_name, NULL);
 898        if (root->flags & CGRP_ROOT_NOPREFIX)
 899                seq_puts(seq, ",noprefix");
 900        if (root->flags & CGRP_ROOT_XATTR)
 901                seq_puts(seq, ",xattr");
 902        if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
 903                seq_puts(seq, ",cpuset_v2_mode");
 904
 905        spin_lock(&release_agent_path_lock);
 906        if (strlen(root->release_agent_path))
 907                seq_show_option(seq, "release_agent",
 908                                root->release_agent_path);
 909        spin_unlock(&release_agent_path_lock);
 910
 911        if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
 912                seq_puts(seq, ",clone_children");
 913        if (strlen(root->name))
 914                seq_show_option(seq, "name", root->name);
 915        return 0;
 916}
 917
 918static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
 919{
 920        char *token, *o = data;
 921        bool all_ss = false, one_ss = false;
 922        u16 mask = U16_MAX;
 923        struct cgroup_subsys *ss;
 924        int nr_opts = 0;
 925        int i;
 926
 927#ifdef CONFIG_CPUSETS
 928        mask = ~((u16)1 << cpuset_cgrp_id);
 929#endif
 930
 931        memset(opts, 0, sizeof(*opts));
 932
 933        while ((token = strsep(&o, ",")) != NULL) {
 934                nr_opts++;
 935
 936                if (!*token)
 937                        return -EINVAL;
 938                if (!strcmp(token, "none")) {
 939                        /* Explicitly have no subsystems */
 940                        opts->none = true;
 941                        continue;
 942                }
 943                if (!strcmp(token, "all")) {
 944                        /* Mutually exclusive option 'all' + subsystem name */
 945                        if (one_ss)
 946                                return -EINVAL;
 947                        all_ss = true;
 948                        continue;
 949                }
 950                if (!strcmp(token, "noprefix")) {
 951                        opts->flags |= CGRP_ROOT_NOPREFIX;
 952                        continue;
 953                }
 954                if (!strcmp(token, "clone_children")) {
 955                        opts->cpuset_clone_children = true;
 956                        continue;
 957                }
 958                if (!strcmp(token, "cpuset_v2_mode")) {
 959                        opts->flags |= CGRP_ROOT_CPUSET_V2_MODE;
 960                        continue;
 961                }
 962                if (!strcmp(token, "xattr")) {
 963                        opts->flags |= CGRP_ROOT_XATTR;
 964                        continue;
 965                }
 966                if (!strncmp(token, "release_agent=", 14)) {
 967                        /* Specifying two release agents is forbidden */
 968                        if (opts->release_agent)
 969                                return -EINVAL;
 970                        opts->release_agent =
 971                                kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
 972                        if (!opts->release_agent)
 973                                return -ENOMEM;
 974                        continue;
 975                }
 976                if (!strncmp(token, "name=", 5)) {
 977                        const char *name = token + 5;
 978                        /* Can't specify an empty name */
 979                        if (!strlen(name))
 980                                return -EINVAL;
 981                        /* Must match [\w.-]+ */
 982                        for (i = 0; i < strlen(name); i++) {
 983                                char c = name[i];
 984                                if (isalnum(c))
 985                                        continue;
 986                                if ((c == '.') || (c == '-') || (c == '_'))
 987                                        continue;
 988                                return -EINVAL;
 989                        }
 990                        /* Specifying two names is forbidden */
 991                        if (opts->name)
 992                                return -EINVAL;
 993                        opts->name = kstrndup(name,
 994                                              MAX_CGROUP_ROOT_NAMELEN - 1,
 995                                              GFP_KERNEL);
 996                        if (!opts->name)
 997                                return -ENOMEM;
 998
 999                        continue;
1000                }
1001
1002                for_each_subsys(ss, i) {
1003                        if (strcmp(token, ss->legacy_name))
1004                                continue;
1005                        if (!cgroup_ssid_enabled(i))
1006                                continue;
1007                        if (cgroup1_ssid_disabled(i))
1008                                continue;
1009
1010                        /* Mutually exclusive option 'all' + subsystem name */
1011                        if (all_ss)
1012                                return -EINVAL;
1013                        opts->subsys_mask |= (1 << i);
1014                        one_ss = true;
1015
1016                        break;
1017                }
1018                if (i == CGROUP_SUBSYS_COUNT)
1019                        return -ENOENT;
1020        }
1021
1022        /*
1023         * If the 'all' option was specified select all the subsystems,
1024         * otherwise if 'none', 'name=' and a subsystem name options were
1025         * not specified, let's default to 'all'
1026         */
1027        if (all_ss || (!one_ss && !opts->none && !opts->name))
1028                for_each_subsys(ss, i)
1029                        if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1030                                opts->subsys_mask |= (1 << i);
1031
1032        /*
1033         * We either have to specify by name or by subsystems. (So all
1034         * empty hierarchies must have a name).
1035         */
1036        if (!opts->subsys_mask && !opts->name)
1037                return -EINVAL;
1038
1039        /*
1040         * Option noprefix was introduced just for backward compatibility
1041         * with the old cpuset, so we allow noprefix only if mounting just
1042         * the cpuset subsystem.
1043         */
1044        if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1045                return -EINVAL;
1046
1047        /* Can't specify "none" and some subsystems */
1048        if (opts->subsys_mask && opts->none)
1049                return -EINVAL;
1050
1051        return 0;
1052}
1053
1054static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
1055{
1056        int ret = 0;
1057        struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1058        struct cgroup_sb_opts opts;
1059        u16 added_mask, removed_mask;
1060
1061        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1062
1063        /* See what subsystems are wanted */
1064        ret = parse_cgroupfs_options(data, &opts);
1065        if (ret)
1066                goto out_unlock;
1067
1068        if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1069                pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1070                        task_tgid_nr(current), current->comm);
1071
1072        added_mask = opts.subsys_mask & ~root->subsys_mask;
1073        removed_mask = root->subsys_mask & ~opts.subsys_mask;
1074
1075        /* Don't allow flags or name to change at remount */
1076        if ((opts.flags ^ root->flags) ||
1077            (opts.name && strcmp(opts.name, root->name))) {
1078                pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1079                       opts.flags, opts.name ?: "", root->flags, root->name);
1080                ret = -EINVAL;
1081                goto out_unlock;
1082        }
1083
1084        /* remounting is not allowed for populated hierarchies */
1085        if (!list_empty(&root->cgrp.self.children)) {
1086                ret = -EBUSY;
1087                goto out_unlock;
1088        }
1089
1090        ret = rebind_subsystems(root, added_mask);
1091        if (ret)
1092                goto out_unlock;
1093
1094        WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1095
1096        if (opts.release_agent) {
1097                spin_lock(&release_agent_path_lock);
1098                strcpy(root->release_agent_path, opts.release_agent);
1099                spin_unlock(&release_agent_path_lock);
1100        }
1101
1102        trace_cgroup_remount(root);
1103
1104 out_unlock:
1105        kfree(opts.release_agent);
1106        kfree(opts.name);
1107        mutex_unlock(&cgroup_mutex);
1108        return ret;
1109}
1110
1111struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1112        .rename                 = cgroup1_rename,
1113        .show_options           = cgroup1_show_options,
1114        .remount_fs             = cgroup1_remount,
1115        .mkdir                  = cgroup_mkdir,
1116        .rmdir                  = cgroup_rmdir,
1117        .show_path              = cgroup_show_path,
1118};
1119
1120struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
1121                             void *data, unsigned long magic,
1122                             struct cgroup_namespace *ns)
1123{
1124        struct super_block *pinned_sb = NULL;
1125        struct cgroup_sb_opts opts;
1126        struct cgroup_root *root;
1127        struct cgroup_subsys *ss;
1128        struct dentry *dentry;
1129        int i, ret;
1130        bool new_root = false;
1131
1132        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1133
1134        /* First find the desired set of subsystems */
1135        ret = parse_cgroupfs_options(data, &opts);
1136        if (ret)
1137                goto out_unlock;
1138
1139        /*
1140         * Destruction of cgroup root is asynchronous, so subsystems may
1141         * still be dying after the previous unmount.  Let's drain the
1142         * dying subsystems.  We just need to ensure that the ones
1143         * unmounted previously finish dying and don't care about new ones
1144         * starting.  Testing ref liveliness is good enough.
1145         */
1146        for_each_subsys(ss, i) {
1147                if (!(opts.subsys_mask & (1 << i)) ||
1148                    ss->root == &cgrp_dfl_root)
1149                        continue;
1150
1151                if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1152                        mutex_unlock(&cgroup_mutex);
1153                        msleep(10);
1154                        ret = restart_syscall();
1155                        goto out_free;
1156                }
1157                cgroup_put(&ss->root->cgrp);
1158        }
1159
1160        for_each_root(root) {
1161                bool name_match = false;
1162
1163                if (root == &cgrp_dfl_root)
1164                        continue;
1165
1166                /*
1167                 * If we asked for a name then it must match.  Also, if
1168                 * name matches but sybsys_mask doesn't, we should fail.
1169                 * Remember whether name matched.
1170                 */
1171                if (opts.name) {
1172                        if (strcmp(opts.name, root->name))
1173                                continue;
1174                        name_match = true;
1175                }
1176
1177                /*
1178                 * If we asked for subsystems (or explicitly for no
1179                 * subsystems) then they must match.
1180                 */
1181                if ((opts.subsys_mask || opts.none) &&
1182                    (opts.subsys_mask != root->subsys_mask)) {
1183                        if (!name_match)
1184                                continue;
1185                        ret = -EBUSY;
1186                        goto out_unlock;
1187                }
1188
1189                if (root->flags ^ opts.flags)
1190                        pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1191
1192                /*
1193                 * We want to reuse @root whose lifetime is governed by its
1194                 * ->cgrp.  Let's check whether @root is alive and keep it
1195                 * that way.  As cgroup_kill_sb() can happen anytime, we
1196                 * want to block it by pinning the sb so that @root doesn't
1197                 * get killed before mount is complete.
1198                 *
1199                 * With the sb pinned, tryget_live can reliably indicate
1200                 * whether @root can be reused.  If it's being killed,
1201                 * drain it.  We can use wait_queue for the wait but this
1202                 * path is super cold.  Let's just sleep a bit and retry.
1203                 */
1204                pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1205                if (IS_ERR(pinned_sb) ||
1206                    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1207                        mutex_unlock(&cgroup_mutex);
1208                        if (!IS_ERR_OR_NULL(pinned_sb))
1209                                deactivate_super(pinned_sb);
1210                        msleep(10);
1211                        ret = restart_syscall();
1212                        goto out_free;
1213                }
1214
1215                ret = 0;
1216                goto out_unlock;
1217        }
1218
1219        /*
1220         * No such thing, create a new one.  name= matching without subsys
1221         * specification is allowed for already existing hierarchies but we
1222         * can't create new one without subsys specification.
1223         */
1224        if (!opts.subsys_mask && !opts.none) {
1225                ret = -EINVAL;
1226                goto out_unlock;
1227        }
1228
1229        /* Hierarchies may only be created in the initial cgroup namespace. */
1230        if (ns != &init_cgroup_ns) {
1231                ret = -EPERM;
1232                goto out_unlock;
1233        }
1234
1235        root = kzalloc(sizeof(*root), GFP_KERNEL);
1236        if (!root) {
1237                ret = -ENOMEM;
1238                goto out_unlock;
1239        }
1240        new_root = true;
1241
1242        init_cgroup_root(root, &opts);
1243
1244        ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD);
1245        if (ret)
1246                cgroup_free_root(root);
1247
1248out_unlock:
1249        mutex_unlock(&cgroup_mutex);
1250out_free:
1251        kfree(opts.release_agent);
1252        kfree(opts.name);
1253
1254        if (ret)
1255                return ERR_PTR(ret);
1256
1257        dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
1258                                 CGROUP_SUPER_MAGIC, ns);
1259
1260        /*
1261         * There's a race window after we release cgroup_mutex and before
1262         * allocating a superblock. Make sure a concurrent process won't
1263         * be able to re-use the root during this window by delaying the
1264         * initialization of root refcnt.
1265         */
1266        if (new_root) {
1267                mutex_lock(&cgroup_mutex);
1268                percpu_ref_reinit(&root->cgrp.self.refcnt);
1269                mutex_unlock(&cgroup_mutex);
1270        }
1271
1272        /*
1273         * If @pinned_sb, we're reusing an existing root and holding an
1274         * extra ref on its sb.  Mount is complete.  Put the extra ref.
1275         */
1276        if (pinned_sb)
1277                deactivate_super(pinned_sb);
1278
1279        return dentry;
1280}
1281
1282static int __init cgroup1_wq_init(void)
1283{
1284        /*
1285         * Used to destroy pidlists and separate to serve as flush domain.
1286         * Cap @max_active to 1 too.
1287         */
1288        cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1289                                                    0, 1);
1290        BUG_ON(!cgroup_pidlist_destroy_wq);
1291        return 0;
1292}
1293core_initcall(cgroup1_wq_init);
1294
1295static int __init cgroup_no_v1(char *str)
1296{
1297        struct cgroup_subsys *ss;
1298        char *token;
1299        int i;
1300
1301        while ((token = strsep(&str, ",")) != NULL) {
1302                if (!*token)
1303                        continue;
1304
1305                if (!strcmp(token, "all")) {
1306                        cgroup_no_v1_mask = U16_MAX;
1307                        break;
1308                }
1309
1310                for_each_subsys(ss, i) {
1311                        if (strcmp(token, ss->name) &&
1312                            strcmp(token, ss->legacy_name))
1313                                continue;
1314
1315                        cgroup_no_v1_mask |= 1 << i;
1316                }
1317        }
1318        return 1;
1319}
1320__setup("cgroup_no_v1=", cgroup_no_v1);
1321