linux/kernel/cgroup/cgroup-v1.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include "cgroup-internal.h"
   3
   4#include <linux/ctype.h>
   5#include <linux/kmod.h>
   6#include <linux/sort.h>
   7#include <linux/delay.h>
   8#include <linux/mm.h>
   9#include <linux/sched/signal.h>
  10#include <linux/sched/task.h>
  11#include <linux/magic.h>
  12#include <linux/slab.h>
  13#include <linux/vmalloc.h>
  14#include <linux/delayacct.h>
  15#include <linux/pid_namespace.h>
  16#include <linux/cgroupstats.h>
  17#include <linux/fs_parser.h>
  18
  19#include <trace/events/cgroup.h>
  20
  21/*
  22 * pidlists linger the following amount before being destroyed.  The goal
  23 * is avoiding frequent destruction in the middle of consecutive read calls
  24 * Expiring in the middle is a performance problem not a correctness one.
  25 * 1 sec should be enough.
  26 */
  27#define CGROUP_PIDLIST_DESTROY_DELAY    HZ
  28
  29/* Controllers blocked by the commandline in v1 */
  30static u16 cgroup_no_v1_mask;
  31
  32/* disable named v1 mounts */
  33static bool cgroup_no_v1_named;
  34
  35/*
  36 * pidlist destructions need to be flushed on cgroup destruction.  Use a
  37 * separate workqueue as flush domain.
  38 */
  39static struct workqueue_struct *cgroup_pidlist_destroy_wq;
  40
  41/* protects cgroup_subsys->release_agent_path */
  42static DEFINE_SPINLOCK(release_agent_path_lock);
  43
  44bool cgroup1_ssid_disabled(int ssid)
  45{
  46        return cgroup_no_v1_mask & (1 << ssid);
  47}
  48
  49/**
  50 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
  51 * @from: attach to all cgroups of a given task
  52 * @tsk: the task to be attached
  53 */
  54int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
  55{
  56        struct cgroup_root *root;
  57        int retval = 0;
  58
  59        mutex_lock(&cgroup_mutex);
  60        percpu_down_write(&cgroup_threadgroup_rwsem);
  61        for_each_root(root) {
  62                struct cgroup *from_cgrp;
  63
  64                if (root == &cgrp_dfl_root)
  65                        continue;
  66
  67                spin_lock_irq(&css_set_lock);
  68                from_cgrp = task_cgroup_from_root(from, root);
  69                spin_unlock_irq(&css_set_lock);
  70
  71                retval = cgroup_attach_task(from_cgrp, tsk, false);
  72                if (retval)
  73                        break;
  74        }
  75        percpu_up_write(&cgroup_threadgroup_rwsem);
  76        mutex_unlock(&cgroup_mutex);
  77
  78        return retval;
  79}
  80EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
  81
  82/**
  83 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
  84 * @to: cgroup to which the tasks will be moved
  85 * @from: cgroup in which the tasks currently reside
  86 *
  87 * Locking rules between cgroup_post_fork() and the migration path
  88 * guarantee that, if a task is forking while being migrated, the new child
  89 * is guaranteed to be either visible in the source cgroup after the
  90 * parent's migration is complete or put into the target cgroup.  No task
  91 * can slip out of migration through forking.
  92 */
  93int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
  94{
  95        DEFINE_CGROUP_MGCTX(mgctx);
  96        struct cgrp_cset_link *link;
  97        struct css_task_iter it;
  98        struct task_struct *task;
  99        int ret;
 100
 101        if (cgroup_on_dfl(to))
 102                return -EINVAL;
 103
 104        ret = cgroup_migrate_vet_dst(to);
 105        if (ret)
 106                return ret;
 107
 108        mutex_lock(&cgroup_mutex);
 109
 110        percpu_down_write(&cgroup_threadgroup_rwsem);
 111
 112        /* all tasks in @from are being moved, all csets are source */
 113        spin_lock_irq(&css_set_lock);
 114        list_for_each_entry(link, &from->cset_links, cset_link)
 115                cgroup_migrate_add_src(link->cset, to, &mgctx);
 116        spin_unlock_irq(&css_set_lock);
 117
 118        ret = cgroup_migrate_prepare_dst(&mgctx);
 119        if (ret)
 120                goto out_err;
 121
 122        /*
 123         * Migrate tasks one-by-one until @from is empty.  This fails iff
 124         * ->can_attach() fails.
 125         */
 126        do {
 127                css_task_iter_start(&from->self, 0, &it);
 128
 129                do {
 130                        task = css_task_iter_next(&it);
 131                } while (task && (task->flags & PF_EXITING));
 132
 133                if (task)
 134                        get_task_struct(task);
 135                css_task_iter_end(&it);
 136
 137                if (task) {
 138                        ret = cgroup_migrate(task, false, &mgctx);
 139                        if (!ret)
 140                                TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
 141                        put_task_struct(task);
 142                }
 143        } while (task && !ret);
 144out_err:
 145        cgroup_migrate_finish(&mgctx);
 146        percpu_up_write(&cgroup_threadgroup_rwsem);
 147        mutex_unlock(&cgroup_mutex);
 148        return ret;
 149}
 150
 151/*
 152 * Stuff for reading the 'tasks'/'procs' files.
 153 *
 154 * Reading this file can return large amounts of data if a cgroup has
 155 * *lots* of attached tasks. So it may need several calls to read(),
 156 * but we cannot guarantee that the information we produce is correct
 157 * unless we produce it entirely atomically.
 158 *
 159 */
 160
 161/* which pidlist file are we talking about? */
 162enum cgroup_filetype {
 163        CGROUP_FILE_PROCS,
 164        CGROUP_FILE_TASKS,
 165};
 166
 167/*
 168 * A pidlist is a list of pids that virtually represents the contents of one
 169 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
 170 * a pair (one each for procs, tasks) for each pid namespace that's relevant
 171 * to the cgroup.
 172 */
 173struct cgroup_pidlist {
 174        /*
 175         * used to find which pidlist is wanted. doesn't change as long as
 176         * this particular list stays in the list.
 177        */
 178        struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
 179        /* array of xids */
 180        pid_t *list;
 181        /* how many elements the above list has */
 182        int length;
 183        /* each of these stored in a list by its cgroup */
 184        struct list_head links;
 185        /* pointer to the cgroup we belong to, for list removal purposes */
 186        struct cgroup *owner;
 187        /* for delayed destruction */
 188        struct delayed_work destroy_dwork;
 189};
 190
 191/*
 192 * Used to destroy all pidlists lingering waiting for destroy timer.  None
 193 * should be left afterwards.
 194 */
 195void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
 196{
 197        struct cgroup_pidlist *l, *tmp_l;
 198
 199        mutex_lock(&cgrp->pidlist_mutex);
 200        list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
 201                mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
 202        mutex_unlock(&cgrp->pidlist_mutex);
 203
 204        flush_workqueue(cgroup_pidlist_destroy_wq);
 205        BUG_ON(!list_empty(&cgrp->pidlists));
 206}
 207
 208static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
 209{
 210        struct delayed_work *dwork = to_delayed_work(work);
 211        struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
 212                                                destroy_dwork);
 213        struct cgroup_pidlist *tofree = NULL;
 214
 215        mutex_lock(&l->owner->pidlist_mutex);
 216
 217        /*
 218         * Destroy iff we didn't get queued again.  The state won't change
 219         * as destroy_dwork can only be queued while locked.
 220         */
 221        if (!delayed_work_pending(dwork)) {
 222                list_del(&l->links);
 223                kvfree(l->list);
 224                put_pid_ns(l->key.ns);
 225                tofree = l;
 226        }
 227
 228        mutex_unlock(&l->owner->pidlist_mutex);
 229        kfree(tofree);
 230}
 231
 232/*
 233 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
 234 * Returns the number of unique elements.
 235 */
 236static int pidlist_uniq(pid_t *list, int length)
 237{
 238        int src, dest = 1;
 239
 240        /*
 241         * we presume the 0th element is unique, so i starts at 1. trivial
 242         * edge cases first; no work needs to be done for either
 243         */
 244        if (length == 0 || length == 1)
 245                return length;
 246        /* src and dest walk down the list; dest counts unique elements */
 247        for (src = 1; src < length; src++) {
 248                /* find next unique element */
 249                while (list[src] == list[src-1]) {
 250                        src++;
 251                        if (src == length)
 252                                goto after;
 253                }
 254                /* dest always points to where the next unique element goes */
 255                list[dest] = list[src];
 256                dest++;
 257        }
 258after:
 259        return dest;
 260}
 261
 262/*
 263 * The two pid files - task and cgroup.procs - guaranteed that the result
 264 * is sorted, which forced this whole pidlist fiasco.  As pid order is
 265 * different per namespace, each namespace needs differently sorted list,
 266 * making it impossible to use, for example, single rbtree of member tasks
 267 * sorted by task pointer.  As pidlists can be fairly large, allocating one
 268 * per open file is dangerous, so cgroup had to implement shared pool of
 269 * pidlists keyed by cgroup and namespace.
 270 */
 271static int cmppid(const void *a, const void *b)
 272{
 273        return *(pid_t *)a - *(pid_t *)b;
 274}
 275
 276static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
 277                                                  enum cgroup_filetype type)
 278{
 279        struct cgroup_pidlist *l;
 280        /* don't need task_nsproxy() if we're looking at ourself */
 281        struct pid_namespace *ns = task_active_pid_ns(current);
 282
 283        lockdep_assert_held(&cgrp->pidlist_mutex);
 284
 285        list_for_each_entry(l, &cgrp->pidlists, links)
 286                if (l->key.type == type && l->key.ns == ns)
 287                        return l;
 288        return NULL;
 289}
 290
 291/*
 292 * find the appropriate pidlist for our purpose (given procs vs tasks)
 293 * returns with the lock on that pidlist already held, and takes care
 294 * of the use count, or returns NULL with no locks held if we're out of
 295 * memory.
 296 */
 297static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
 298                                                enum cgroup_filetype type)
 299{
 300        struct cgroup_pidlist *l;
 301
 302        lockdep_assert_held(&cgrp->pidlist_mutex);
 303
 304        l = cgroup_pidlist_find(cgrp, type);
 305        if (l)
 306                return l;
 307
 308        /* entry not found; create a new one */
 309        l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
 310        if (!l)
 311                return l;
 312
 313        INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
 314        l->key.type = type;
 315        /* don't need task_nsproxy() if we're looking at ourself */
 316        l->key.ns = get_pid_ns(task_active_pid_ns(current));
 317        l->owner = cgrp;
 318        list_add(&l->links, &cgrp->pidlists);
 319        return l;
 320}
 321
 322/*
 323 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
 324 */
 325static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
 326                              struct cgroup_pidlist **lp)
 327{
 328        pid_t *array;
 329        int length;
 330        int pid, n = 0; /* used for populating the array */
 331        struct css_task_iter it;
 332        struct task_struct *tsk;
 333        struct cgroup_pidlist *l;
 334
 335        lockdep_assert_held(&cgrp->pidlist_mutex);
 336
 337        /*
 338         * If cgroup gets more users after we read count, we won't have
 339         * enough space - tough.  This race is indistinguishable to the
 340         * caller from the case that the additional cgroup users didn't
 341         * show up until sometime later on.
 342         */
 343        length = cgroup_task_count(cgrp);
 344        array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
 345        if (!array)
 346                return -ENOMEM;
 347        /* now, populate the array */
 348        css_task_iter_start(&cgrp->self, 0, &it);
 349        while ((tsk = css_task_iter_next(&it))) {
 350                if (unlikely(n == length))
 351                        break;
 352                /* get tgid or pid for procs or tasks file respectively */
 353                if (type == CGROUP_FILE_PROCS)
 354                        pid = task_tgid_vnr(tsk);
 355                else
 356                        pid = task_pid_vnr(tsk);
 357                if (pid > 0) /* make sure to only use valid results */
 358                        array[n++] = pid;
 359        }
 360        css_task_iter_end(&it);
 361        length = n;
 362        /* now sort & (if procs) strip out duplicates */
 363        sort(array, length, sizeof(pid_t), cmppid, NULL);
 364        if (type == CGROUP_FILE_PROCS)
 365                length = pidlist_uniq(array, length);
 366
 367        l = cgroup_pidlist_find_create(cgrp, type);
 368        if (!l) {
 369                kvfree(array);
 370                return -ENOMEM;
 371        }
 372
 373        /* store array, freeing old if necessary */
 374        kvfree(l->list);
 375        l->list = array;
 376        l->length = length;
 377        *lp = l;
 378        return 0;
 379}
 380
 381/*
 382 * seq_file methods for the tasks/procs files. The seq_file position is the
 383 * next pid to display; the seq_file iterator is a pointer to the pid
 384 * in the cgroup->l->list array.
 385 */
 386
 387static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
 388{
 389        /*
 390         * Initially we receive a position value that corresponds to
 391         * one more than the last pid shown (or 0 on the first call or
 392         * after a seek to the start). Use a binary-search to find the
 393         * next pid to display, if any
 394         */
 395        struct kernfs_open_file *of = s->private;
 396        struct cgroup *cgrp = seq_css(s)->cgroup;
 397        struct cgroup_pidlist *l;
 398        enum cgroup_filetype type = seq_cft(s)->private;
 399        int index = 0, pid = *pos;
 400        int *iter, ret;
 401
 402        mutex_lock(&cgrp->pidlist_mutex);
 403
 404        /*
 405         * !NULL @of->priv indicates that this isn't the first start()
 406         * after open.  If the matching pidlist is around, we can use that.
 407         * Look for it.  Note that @of->priv can't be used directly.  It
 408         * could already have been destroyed.
 409         */
 410        if (of->priv)
 411                of->priv = cgroup_pidlist_find(cgrp, type);
 412
 413        /*
 414         * Either this is the first start() after open or the matching
 415         * pidlist has been destroyed inbetween.  Create a new one.
 416         */
 417        if (!of->priv) {
 418                ret = pidlist_array_load(cgrp, type,
 419                                         (struct cgroup_pidlist **)&of->priv);
 420                if (ret)
 421                        return ERR_PTR(ret);
 422        }
 423        l = of->priv;
 424
 425        if (pid) {
 426                int end = l->length;
 427
 428                while (index < end) {
 429                        int mid = (index + end) / 2;
 430                        if (l->list[mid] == pid) {
 431                                index = mid;
 432                                break;
 433                        } else if (l->list[mid] <= pid)
 434                                index = mid + 1;
 435                        else
 436                                end = mid;
 437                }
 438        }
 439        /* If we're off the end of the array, we're done */
 440        if (index >= l->length)
 441                return NULL;
 442        /* Update the abstract position to be the actual pid that we found */
 443        iter = l->list + index;
 444        *pos = *iter;
 445        return iter;
 446}
 447
 448static void cgroup_pidlist_stop(struct seq_file *s, void *v)
 449{
 450        struct kernfs_open_file *of = s->private;
 451        struct cgroup_pidlist *l = of->priv;
 452
 453        if (l)
 454                mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
 455                                 CGROUP_PIDLIST_DESTROY_DELAY);
 456        mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
 457}
 458
 459static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
 460{
 461        struct kernfs_open_file *of = s->private;
 462        struct cgroup_pidlist *l = of->priv;
 463        pid_t *p = v;
 464        pid_t *end = l->list + l->length;
 465        /*
 466         * Advance to the next pid in the array. If this goes off the
 467         * end, we're done
 468         */
 469        p++;
 470        if (p >= end) {
 471                (*pos)++;
 472                return NULL;
 473        } else {
 474                *pos = *p;
 475                return p;
 476        }
 477}
 478
 479static int cgroup_pidlist_show(struct seq_file *s, void *v)
 480{
 481        seq_printf(s, "%d\n", *(int *)v);
 482
 483        return 0;
 484}
 485
 486static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
 487                                     char *buf, size_t nbytes, loff_t off,
 488                                     bool threadgroup)
 489{
 490        struct cgroup *cgrp;
 491        struct task_struct *task;
 492        const struct cred *cred, *tcred;
 493        ssize_t ret;
 494        bool locked;
 495
 496        cgrp = cgroup_kn_lock_live(of->kn, false);
 497        if (!cgrp)
 498                return -ENODEV;
 499
 500        task = cgroup_procs_write_start(buf, threadgroup, &locked);
 501        ret = PTR_ERR_OR_ZERO(task);
 502        if (ret)
 503                goto out_unlock;
 504
 505        /*
 506         * Even if we're attaching all tasks in the thread group, we only
 507         * need to check permissions on one of them.
 508         */
 509        cred = current_cred();
 510        tcred = get_task_cred(task);
 511        if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 512            !uid_eq(cred->euid, tcred->uid) &&
 513            !uid_eq(cred->euid, tcred->suid))
 514                ret = -EACCES;
 515        put_cred(tcred);
 516        if (ret)
 517                goto out_finish;
 518
 519        ret = cgroup_attach_task(cgrp, task, threadgroup);
 520
 521out_finish:
 522        cgroup_procs_write_finish(task, locked);
 523out_unlock:
 524        cgroup_kn_unlock(of->kn);
 525
 526        return ret ?: nbytes;
 527}
 528
 529static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
 530                                   char *buf, size_t nbytes, loff_t off)
 531{
 532        return __cgroup1_procs_write(of, buf, nbytes, off, true);
 533}
 534
 535static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
 536                                   char *buf, size_t nbytes, loff_t off)
 537{
 538        return __cgroup1_procs_write(of, buf, nbytes, off, false);
 539}
 540
 541static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
 542                                          char *buf, size_t nbytes, loff_t off)
 543{
 544        struct cgroup *cgrp;
 545
 546        BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
 547
 548        cgrp = cgroup_kn_lock_live(of->kn, false);
 549        if (!cgrp)
 550                return -ENODEV;
 551        spin_lock(&release_agent_path_lock);
 552        strlcpy(cgrp->root->release_agent_path, strstrip(buf),
 553                sizeof(cgrp->root->release_agent_path));
 554        spin_unlock(&release_agent_path_lock);
 555        cgroup_kn_unlock(of->kn);
 556        return nbytes;
 557}
 558
 559static int cgroup_release_agent_show(struct seq_file *seq, void *v)
 560{
 561        struct cgroup *cgrp = seq_css(seq)->cgroup;
 562
 563        spin_lock(&release_agent_path_lock);
 564        seq_puts(seq, cgrp->root->release_agent_path);
 565        spin_unlock(&release_agent_path_lock);
 566        seq_putc(seq, '\n');
 567        return 0;
 568}
 569
 570static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
 571{
 572        seq_puts(seq, "0\n");
 573        return 0;
 574}
 575
 576static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
 577                                         struct cftype *cft)
 578{
 579        return notify_on_release(css->cgroup);
 580}
 581
 582static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
 583                                          struct cftype *cft, u64 val)
 584{
 585        if (val)
 586                set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
 587        else
 588                clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
 589        return 0;
 590}
 591
 592static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
 593                                      struct cftype *cft)
 594{
 595        return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 596}
 597
 598static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
 599                                       struct cftype *cft, u64 val)
 600{
 601        if (val)
 602                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 603        else
 604                clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 605        return 0;
 606}
 607
 608/* cgroup core interface files for the legacy hierarchies */
 609struct cftype cgroup1_base_files[] = {
 610        {
 611                .name = "cgroup.procs",
 612                .seq_start = cgroup_pidlist_start,
 613                .seq_next = cgroup_pidlist_next,
 614                .seq_stop = cgroup_pidlist_stop,
 615                .seq_show = cgroup_pidlist_show,
 616                .private = CGROUP_FILE_PROCS,
 617                .write = cgroup1_procs_write,
 618        },
 619        {
 620                .name = "cgroup.clone_children",
 621                .read_u64 = cgroup_clone_children_read,
 622                .write_u64 = cgroup_clone_children_write,
 623        },
 624        {
 625                .name = "cgroup.sane_behavior",
 626                .flags = CFTYPE_ONLY_ON_ROOT,
 627                .seq_show = cgroup_sane_behavior_show,
 628        },
 629        {
 630                .name = "tasks",
 631                .seq_start = cgroup_pidlist_start,
 632                .seq_next = cgroup_pidlist_next,
 633                .seq_stop = cgroup_pidlist_stop,
 634                .seq_show = cgroup_pidlist_show,
 635                .private = CGROUP_FILE_TASKS,
 636                .write = cgroup1_tasks_write,
 637        },
 638        {
 639                .name = "notify_on_release",
 640                .read_u64 = cgroup_read_notify_on_release,
 641                .write_u64 = cgroup_write_notify_on_release,
 642        },
 643        {
 644                .name = "release_agent",
 645                .flags = CFTYPE_ONLY_ON_ROOT,
 646                .seq_show = cgroup_release_agent_show,
 647                .write = cgroup_release_agent_write,
 648                .max_write_len = PATH_MAX - 1,
 649        },
 650        { }     /* terminate */
 651};
 652
 653/* Display information about each subsystem and each hierarchy */
 654int proc_cgroupstats_show(struct seq_file *m, void *v)
 655{
 656        struct cgroup_subsys *ss;
 657        int i;
 658
 659        seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
 660        /*
 661         * ideally we don't want subsystems moving around while we do this.
 662         * cgroup_mutex is also necessary to guarantee an atomic snapshot of
 663         * subsys/hierarchy state.
 664         */
 665        mutex_lock(&cgroup_mutex);
 666
 667        for_each_subsys(ss, i)
 668                seq_printf(m, "%s\t%d\t%d\t%d\n",
 669                           ss->legacy_name, ss->root->hierarchy_id,
 670                           atomic_read(&ss->root->nr_cgrps),
 671                           cgroup_ssid_enabled(i));
 672
 673        mutex_unlock(&cgroup_mutex);
 674        return 0;
 675}
 676
 677/**
 678 * cgroupstats_build - build and fill cgroupstats
 679 * @stats: cgroupstats to fill information into
 680 * @dentry: A dentry entry belonging to the cgroup for which stats have
 681 * been requested.
 682 *
 683 * Build and fill cgroupstats so that taskstats can export it to user
 684 * space.
 685 */
 686int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
 687{
 688        struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
 689        struct cgroup *cgrp;
 690        struct css_task_iter it;
 691        struct task_struct *tsk;
 692
 693        /* it should be kernfs_node belonging to cgroupfs and is a directory */
 694        if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
 695            kernfs_type(kn) != KERNFS_DIR)
 696                return -EINVAL;
 697
 698        mutex_lock(&cgroup_mutex);
 699
 700        /*
 701         * We aren't being called from kernfs and there's no guarantee on
 702         * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
 703         * @kn->priv is RCU safe.  Let's do the RCU dancing.
 704         */
 705        rcu_read_lock();
 706        cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
 707        if (!cgrp || cgroup_is_dead(cgrp)) {
 708                rcu_read_unlock();
 709                mutex_unlock(&cgroup_mutex);
 710                return -ENOENT;
 711        }
 712        rcu_read_unlock();
 713
 714        css_task_iter_start(&cgrp->self, 0, &it);
 715        while ((tsk = css_task_iter_next(&it))) {
 716                switch (tsk->state) {
 717                case TASK_RUNNING:
 718                        stats->nr_running++;
 719                        break;
 720                case TASK_INTERRUPTIBLE:
 721                        stats->nr_sleeping++;
 722                        break;
 723                case TASK_UNINTERRUPTIBLE:
 724                        stats->nr_uninterruptible++;
 725                        break;
 726                case TASK_STOPPED:
 727                        stats->nr_stopped++;
 728                        break;
 729                default:
 730                        if (delayacct_is_task_waiting_on_io(tsk))
 731                                stats->nr_io_wait++;
 732                        break;
 733                }
 734        }
 735        css_task_iter_end(&it);
 736
 737        mutex_unlock(&cgroup_mutex);
 738        return 0;
 739}
 740
 741void cgroup1_check_for_release(struct cgroup *cgrp)
 742{
 743        if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
 744            !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
 745                schedule_work(&cgrp->release_agent_work);
 746}
 747
 748/*
 749 * Notify userspace when a cgroup is released, by running the
 750 * configured release agent with the name of the cgroup (path
 751 * relative to the root of cgroup file system) as the argument.
 752 *
 753 * Most likely, this user command will try to rmdir this cgroup.
 754 *
 755 * This races with the possibility that some other task will be
 756 * attached to this cgroup before it is removed, or that some other
 757 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 758 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 759 * unused, and this cgroup will be reprieved from its death sentence,
 760 * to continue to serve a useful existence.  Next time it's released,
 761 * we will get notified again, if it still has 'notify_on_release' set.
 762 *
 763 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 764 * means only wait until the task is successfully execve()'d.  The
 765 * separate release agent task is forked by call_usermodehelper(),
 766 * then control in this thread returns here, without waiting for the
 767 * release agent task.  We don't bother to wait because the caller of
 768 * this routine has no use for the exit status of the release agent
 769 * task, so no sense holding our caller up for that.
 770 */
 771void cgroup1_release_agent(struct work_struct *work)
 772{
 773        struct cgroup *cgrp =
 774                container_of(work, struct cgroup, release_agent_work);
 775        char *pathbuf, *agentbuf;
 776        char *argv[3], *envp[3];
 777        int ret;
 778
 779        /* snoop agent path and exit early if empty */
 780        if (!cgrp->root->release_agent_path[0])
 781                return;
 782
 783        /* prepare argument buffers */
 784        pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
 785        agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
 786        if (!pathbuf || !agentbuf)
 787                goto out_free;
 788
 789        spin_lock(&release_agent_path_lock);
 790        strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
 791        spin_unlock(&release_agent_path_lock);
 792        if (!agentbuf[0])
 793                goto out_free;
 794
 795        ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
 796        if (ret < 0 || ret >= PATH_MAX)
 797                goto out_free;
 798
 799        argv[0] = agentbuf;
 800        argv[1] = pathbuf;
 801        argv[2] = NULL;
 802
 803        /* minimal command environment */
 804        envp[0] = "HOME=/";
 805        envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
 806        envp[2] = NULL;
 807
 808        call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
 809out_free:
 810        kfree(agentbuf);
 811        kfree(pathbuf);
 812}
 813
 814/*
 815 * cgroup_rename - Only allow simple rename of directories in place.
 816 */
 817static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
 818                          const char *new_name_str)
 819{
 820        struct cgroup *cgrp = kn->priv;
 821        int ret;
 822
 823        if (kernfs_type(kn) != KERNFS_DIR)
 824                return -ENOTDIR;
 825        if (kn->parent != new_parent)
 826                return -EIO;
 827
 828        /*
 829         * We're gonna grab cgroup_mutex which nests outside kernfs
 830         * active_ref.  kernfs_rename() doesn't require active_ref
 831         * protection.  Break them before grabbing cgroup_mutex.
 832         */
 833        kernfs_break_active_protection(new_parent);
 834        kernfs_break_active_protection(kn);
 835
 836        mutex_lock(&cgroup_mutex);
 837
 838        ret = kernfs_rename(kn, new_parent, new_name_str);
 839        if (!ret)
 840                TRACE_CGROUP_PATH(rename, cgrp);
 841
 842        mutex_unlock(&cgroup_mutex);
 843
 844        kernfs_unbreak_active_protection(kn);
 845        kernfs_unbreak_active_protection(new_parent);
 846        return ret;
 847}
 848
 849static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
 850{
 851        struct cgroup_root *root = cgroup_root_from_kf(kf_root);
 852        struct cgroup_subsys *ss;
 853        int ssid;
 854
 855        for_each_subsys(ss, ssid)
 856                if (root->subsys_mask & (1 << ssid))
 857                        seq_show_option(seq, ss->legacy_name, NULL);
 858        if (root->flags & CGRP_ROOT_NOPREFIX)
 859                seq_puts(seq, ",noprefix");
 860        if (root->flags & CGRP_ROOT_XATTR)
 861                seq_puts(seq, ",xattr");
 862        if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
 863                seq_puts(seq, ",cpuset_v2_mode");
 864
 865        spin_lock(&release_agent_path_lock);
 866        if (strlen(root->release_agent_path))
 867                seq_show_option(seq, "release_agent",
 868                                root->release_agent_path);
 869        spin_unlock(&release_agent_path_lock);
 870
 871        if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
 872                seq_puts(seq, ",clone_children");
 873        if (strlen(root->name))
 874                seq_show_option(seq, "name", root->name);
 875        return 0;
 876}
 877
 878enum cgroup1_param {
 879        Opt_all,
 880        Opt_clone_children,
 881        Opt_cpuset_v2_mode,
 882        Opt_name,
 883        Opt_none,
 884        Opt_noprefix,
 885        Opt_release_agent,
 886        Opt_xattr,
 887};
 888
 889const struct fs_parameter_spec cgroup1_fs_parameters[] = {
 890        fsparam_flag  ("all",           Opt_all),
 891        fsparam_flag  ("clone_children", Opt_clone_children),
 892        fsparam_flag  ("cpuset_v2_mode", Opt_cpuset_v2_mode),
 893        fsparam_string("name",          Opt_name),
 894        fsparam_flag  ("none",          Opt_none),
 895        fsparam_flag  ("noprefix",      Opt_noprefix),
 896        fsparam_string("release_agent", Opt_release_agent),
 897        fsparam_flag  ("xattr",         Opt_xattr),
 898        {}
 899};
 900
 901int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
 902{
 903        struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
 904        struct cgroup_subsys *ss;
 905        struct fs_parse_result result;
 906        int opt, i;
 907
 908        opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
 909        if (opt == -ENOPARAM) {
 910                if (strcmp(param->key, "source") == 0) {
 911                        if (fc->source)
 912                                return invalf(fc, "Multiple sources not supported");
 913                        fc->source = param->string;
 914                        param->string = NULL;
 915                        return 0;
 916                }
 917                for_each_subsys(ss, i) {
 918                        if (strcmp(param->key, ss->legacy_name))
 919                                continue;
 920                        if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
 921                                return invalfc(fc, "Disabled controller '%s'",
 922                                               param->key);
 923                        ctx->subsys_mask |= (1 << i);
 924                        return 0;
 925                }
 926                return invalfc(fc, "Unknown subsys name '%s'", param->key);
 927        }
 928        if (opt < 0)
 929                return opt;
 930
 931        switch (opt) {
 932        case Opt_none:
 933                /* Explicitly have no subsystems */
 934                ctx->none = true;
 935                break;
 936        case Opt_all:
 937                ctx->all_ss = true;
 938                break;
 939        case Opt_noprefix:
 940                ctx->flags |= CGRP_ROOT_NOPREFIX;
 941                break;
 942        case Opt_clone_children:
 943                ctx->cpuset_clone_children = true;
 944                break;
 945        case Opt_cpuset_v2_mode:
 946                ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
 947                break;
 948        case Opt_xattr:
 949                ctx->flags |= CGRP_ROOT_XATTR;
 950                break;
 951        case Opt_release_agent:
 952                /* Specifying two release agents is forbidden */
 953                if (ctx->release_agent)
 954                        return invalfc(fc, "release_agent respecified");
 955                ctx->release_agent = param->string;
 956                param->string = NULL;
 957                break;
 958        case Opt_name:
 959                /* blocked by boot param? */
 960                if (cgroup_no_v1_named)
 961                        return -ENOENT;
 962                /* Can't specify an empty name */
 963                if (!param->size)
 964                        return invalfc(fc, "Empty name");
 965                if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
 966                        return invalfc(fc, "Name too long");
 967                /* Must match [\w.-]+ */
 968                for (i = 0; i < param->size; i++) {
 969                        char c = param->string[i];
 970                        if (isalnum(c))
 971                                continue;
 972                        if ((c == '.') || (c == '-') || (c == '_'))
 973                                continue;
 974                        return invalfc(fc, "Invalid name");
 975                }
 976                /* Specifying two names is forbidden */
 977                if (ctx->name)
 978                        return invalfc(fc, "name respecified");
 979                ctx->name = param->string;
 980                param->string = NULL;
 981                break;
 982        }
 983        return 0;
 984}
 985
 986static int check_cgroupfs_options(struct fs_context *fc)
 987{
 988        struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
 989        u16 mask = U16_MAX;
 990        u16 enabled = 0;
 991        struct cgroup_subsys *ss;
 992        int i;
 993
 994#ifdef CONFIG_CPUSETS
 995        mask = ~((u16)1 << cpuset_cgrp_id);
 996#endif
 997        for_each_subsys(ss, i)
 998                if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
 999                        enabled |= 1 << i;
1000
1001        ctx->subsys_mask &= enabled;
1002
1003        /*
1004         * In absense of 'none', 'name=' or subsystem name options,
1005         * let's default to 'all'.
1006         */
1007        if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1008                ctx->all_ss = true;
1009
1010        if (ctx->all_ss) {
1011                /* Mutually exclusive option 'all' + subsystem name */
1012                if (ctx->subsys_mask)
1013                        return invalfc(fc, "subsys name conflicts with all");
1014                /* 'all' => select all the subsystems */
1015                ctx->subsys_mask = enabled;
1016        }
1017
1018        /*
1019         * We either have to specify by name or by subsystems. (So all
1020         * empty hierarchies must have a name).
1021         */
1022        if (!ctx->subsys_mask && !ctx->name)
1023                return invalfc(fc, "Need name or subsystem set");
1024
1025        /*
1026         * Option noprefix was introduced just for backward compatibility
1027         * with the old cpuset, so we allow noprefix only if mounting just
1028         * the cpuset subsystem.
1029         */
1030        if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1031                return invalfc(fc, "noprefix used incorrectly");
1032
1033        /* Can't specify "none" and some subsystems */
1034        if (ctx->subsys_mask && ctx->none)
1035                return invalfc(fc, "none used incorrectly");
1036
1037        return 0;
1038}
1039
1040int cgroup1_reconfigure(struct fs_context *fc)
1041{
1042        struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1043        struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1044        struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1045        int ret = 0;
1046        u16 added_mask, removed_mask;
1047
1048        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1049
1050        /* See what subsystems are wanted */
1051        ret = check_cgroupfs_options(fc);
1052        if (ret)
1053                goto out_unlock;
1054
1055        if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1056                pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1057                        task_tgid_nr(current), current->comm);
1058
1059        added_mask = ctx->subsys_mask & ~root->subsys_mask;
1060        removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1061
1062        /* Don't allow flags or name to change at remount */
1063        if ((ctx->flags ^ root->flags) ||
1064            (ctx->name && strcmp(ctx->name, root->name))) {
1065                errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1066                       ctx->flags, ctx->name ?: "", root->flags, root->name);
1067                ret = -EINVAL;
1068                goto out_unlock;
1069        }
1070
1071        /* remounting is not allowed for populated hierarchies */
1072        if (!list_empty(&root->cgrp.self.children)) {
1073                ret = -EBUSY;
1074                goto out_unlock;
1075        }
1076
1077        ret = rebind_subsystems(root, added_mask);
1078        if (ret)
1079                goto out_unlock;
1080
1081        WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1082
1083        if (ctx->release_agent) {
1084                spin_lock(&release_agent_path_lock);
1085                strcpy(root->release_agent_path, ctx->release_agent);
1086                spin_unlock(&release_agent_path_lock);
1087        }
1088
1089        trace_cgroup_remount(root);
1090
1091 out_unlock:
1092        mutex_unlock(&cgroup_mutex);
1093        return ret;
1094}
1095
1096struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1097        .rename                 = cgroup1_rename,
1098        .show_options           = cgroup1_show_options,
1099        .mkdir                  = cgroup_mkdir,
1100        .rmdir                  = cgroup_rmdir,
1101        .show_path              = cgroup_show_path,
1102};
1103
1104/*
1105 * The guts of cgroup1 mount - find or create cgroup_root to use.
1106 * Called with cgroup_mutex held; returns 0 on success, -E... on
1107 * error and positive - in case when the candidate is busy dying.
1108 * On success it stashes a reference to cgroup_root into given
1109 * cgroup_fs_context; that reference is *NOT* counting towards the
1110 * cgroup_root refcount.
1111 */
1112static int cgroup1_root_to_use(struct fs_context *fc)
1113{
1114        struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1115        struct cgroup_root *root;
1116        struct cgroup_subsys *ss;
1117        int i, ret;
1118
1119        /* First find the desired set of subsystems */
1120        ret = check_cgroupfs_options(fc);
1121        if (ret)
1122                return ret;
1123
1124        /*
1125         * Destruction of cgroup root is asynchronous, so subsystems may
1126         * still be dying after the previous unmount.  Let's drain the
1127         * dying subsystems.  We just need to ensure that the ones
1128         * unmounted previously finish dying and don't care about new ones
1129         * starting.  Testing ref liveliness is good enough.
1130         */
1131        for_each_subsys(ss, i) {
1132                if (!(ctx->subsys_mask & (1 << i)) ||
1133                    ss->root == &cgrp_dfl_root)
1134                        continue;
1135
1136                if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1137                        return 1;       /* restart */
1138                cgroup_put(&ss->root->cgrp);
1139        }
1140
1141        for_each_root(root) {
1142                bool name_match = false;
1143
1144                if (root == &cgrp_dfl_root)
1145                        continue;
1146
1147                /*
1148                 * If we asked for a name then it must match.  Also, if
1149                 * name matches but sybsys_mask doesn't, we should fail.
1150                 * Remember whether name matched.
1151                 */
1152                if (ctx->name) {
1153                        if (strcmp(ctx->name, root->name))
1154                                continue;
1155                        name_match = true;
1156                }
1157
1158                /*
1159                 * If we asked for subsystems (or explicitly for no
1160                 * subsystems) then they must match.
1161                 */
1162                if ((ctx->subsys_mask || ctx->none) &&
1163                    (ctx->subsys_mask != root->subsys_mask)) {
1164                        if (!name_match)
1165                                continue;
1166                        return -EBUSY;
1167                }
1168
1169                if (root->flags ^ ctx->flags)
1170                        pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1171
1172                ctx->root = root;
1173                return 0;
1174        }
1175
1176        /*
1177         * No such thing, create a new one.  name= matching without subsys
1178         * specification is allowed for already existing hierarchies but we
1179         * can't create new one without subsys specification.
1180         */
1181        if (!ctx->subsys_mask && !ctx->none)
1182                return invalfc(fc, "No subsys list or none specified");
1183
1184        /* Hierarchies may only be created in the initial cgroup namespace. */
1185        if (ctx->ns != &init_cgroup_ns)
1186                return -EPERM;
1187
1188        root = kzalloc(sizeof(*root), GFP_KERNEL);
1189        if (!root)
1190                return -ENOMEM;
1191
1192        ctx->root = root;
1193        init_cgroup_root(ctx);
1194
1195        ret = cgroup_setup_root(root, ctx->subsys_mask);
1196        if (ret)
1197                cgroup_free_root(root);
1198        return ret;
1199}
1200
1201int cgroup1_get_tree(struct fs_context *fc)
1202{
1203        struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1204        int ret;
1205
1206        /* Check if the caller has permission to mount. */
1207        if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1208                return -EPERM;
1209
1210        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1211
1212        ret = cgroup1_root_to_use(fc);
1213        if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1214                ret = 1;        /* restart */
1215
1216        mutex_unlock(&cgroup_mutex);
1217
1218        if (!ret)
1219                ret = cgroup_do_get_tree(fc);
1220
1221        if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1222                struct super_block *sb = fc->root->d_sb;
1223                dput(fc->root);
1224                deactivate_locked_super(sb);
1225                ret = 1;
1226        }
1227
1228        if (unlikely(ret > 0)) {
1229                msleep(10);
1230                return restart_syscall();
1231        }
1232        return ret;
1233}
1234
1235static int __init cgroup1_wq_init(void)
1236{
1237        /*
1238         * Used to destroy pidlists and separate to serve as flush domain.
1239         * Cap @max_active to 1 too.
1240         */
1241        cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1242                                                    0, 1);
1243        BUG_ON(!cgroup_pidlist_destroy_wq);
1244        return 0;
1245}
1246core_initcall(cgroup1_wq_init);
1247
1248static int __init cgroup_no_v1(char *str)
1249{
1250        struct cgroup_subsys *ss;
1251        char *token;
1252        int i;
1253
1254        while ((token = strsep(&str, ",")) != NULL) {
1255                if (!*token)
1256                        continue;
1257
1258                if (!strcmp(token, "all")) {
1259                        cgroup_no_v1_mask = U16_MAX;
1260                        continue;
1261                }
1262
1263                if (!strcmp(token, "named")) {
1264                        cgroup_no_v1_named = true;
1265                        continue;
1266                }
1267
1268                for_each_subsys(ss, i) {
1269                        if (strcmp(token, ss->name) &&
1270                            strcmp(token, ss->legacy_name))
1271                                continue;
1272
1273                        cgroup_no_v1_mask |= 1 << i;
1274                }
1275        }
1276        return 1;
1277}
1278__setup("cgroup_no_v1=", cgroup_no_v1);
1279