1#include "cgroup-internal.h"
2
3#include <linux/ctype.h>
4#include <linux/kmod.h>
5#include <linux/sort.h>
6#include <linux/delay.h>
7#include <linux/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/sched/task.h>
10#include <linux/magic.h>
11#include <linux/slab.h>
12#include <linux/vmalloc.h>
13#include <linux/delayacct.h>
14#include <linux/pid_namespace.h>
15#include <linux/cgroupstats.h>
16
17#include <trace/events/cgroup.h>
18
19
20
21
22
23
24
25#define CGROUP_PIDLIST_DESTROY_DELAY HZ
26
27
28static u16 cgroup_no_v1_mask;
29
30
31
32
33
34static struct workqueue_struct *cgroup_pidlist_destroy_wq;
35
36
37
38
39
40static DEFINE_SPINLOCK(release_agent_path_lock);
41
42bool cgroup1_ssid_disabled(int ssid)
43{
44 return cgroup_no_v1_mask & (1 << ssid);
45}
46
47
48
49
50
51
52int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
53{
54 struct cgroup_root *root;
55 int retval = 0;
56
57 mutex_lock(&cgroup_mutex);
58 percpu_down_write(&cgroup_threadgroup_rwsem);
59 for_each_root(root) {
60 struct cgroup *from_cgrp;
61
62 if (root == &cgrp_dfl_root)
63 continue;
64
65 spin_lock_irq(&css_set_lock);
66 from_cgrp = task_cgroup_from_root(from, root);
67 spin_unlock_irq(&css_set_lock);
68
69 retval = cgroup_attach_task(from_cgrp, tsk, false);
70 if (retval)
71 break;
72 }
73 percpu_up_write(&cgroup_threadgroup_rwsem);
74 mutex_unlock(&cgroup_mutex);
75
76 return retval;
77}
78EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
79
80
81
82
83
84
85
86
87
88
89
90
91int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
92{
93 DEFINE_CGROUP_MGCTX(mgctx);
94 struct cgrp_cset_link *link;
95 struct css_task_iter it;
96 struct task_struct *task;
97 int ret;
98
99 if (cgroup_on_dfl(to))
100 return -EINVAL;
101
102 ret = cgroup_migrate_vet_dst(to);
103 if (ret)
104 return ret;
105
106 mutex_lock(&cgroup_mutex);
107
108 percpu_down_write(&cgroup_threadgroup_rwsem);
109
110
111 spin_lock_irq(&css_set_lock);
112 list_for_each_entry(link, &from->cset_links, cset_link)
113 cgroup_migrate_add_src(link->cset, to, &mgctx);
114 spin_unlock_irq(&css_set_lock);
115
116 ret = cgroup_migrate_prepare_dst(&mgctx);
117 if (ret)
118 goto out_err;
119
120
121
122
123
124 do {
125 css_task_iter_start(&from->self, 0, &it);
126
127 do {
128 task = css_task_iter_next(&it);
129 } while (task && (task->flags & PF_EXITING));
130
131 if (task)
132 get_task_struct(task);
133 css_task_iter_end(&it);
134
135 if (task) {
136 ret = cgroup_migrate(task, false, &mgctx);
137 if (!ret)
138 trace_cgroup_transfer_tasks(to, task, false);
139 put_task_struct(task);
140 }
141 } while (task && !ret);
142out_err:
143 cgroup_migrate_finish(&mgctx);
144 percpu_up_write(&cgroup_threadgroup_rwsem);
145 mutex_unlock(&cgroup_mutex);
146 return ret;
147}
148
149
150
151
152
153
154
155
156
157
158
159
160enum cgroup_filetype {
161 CGROUP_FILE_PROCS,
162 CGROUP_FILE_TASKS,
163};
164
165
166
167
168
169
170
171struct cgroup_pidlist {
172
173
174
175
176 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
177
178 pid_t *list;
179
180 int length;
181
182 struct list_head links;
183
184 struct cgroup *owner;
185
186 struct delayed_work destroy_dwork;
187};
188
189
190
191
192
193
194#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
195static void *pidlist_allocate(int count)
196{
197 if (PIDLIST_TOO_LARGE(count))
198 return vmalloc(count * sizeof(pid_t));
199 else
200 return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
201}
202
203static void pidlist_free(void *p)
204{
205 kvfree(p);
206}
207
208
209
210
211
212void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
213{
214 struct cgroup_pidlist *l, *tmp_l;
215
216 mutex_lock(&cgrp->pidlist_mutex);
217 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
218 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
219 mutex_unlock(&cgrp->pidlist_mutex);
220
221 flush_workqueue(cgroup_pidlist_destroy_wq);
222 BUG_ON(!list_empty(&cgrp->pidlists));
223}
224
225static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
226{
227 struct delayed_work *dwork = to_delayed_work(work);
228 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
229 destroy_dwork);
230 struct cgroup_pidlist *tofree = NULL;
231
232 mutex_lock(&l->owner->pidlist_mutex);
233
234
235
236
237
238 if (!delayed_work_pending(dwork)) {
239 list_del(&l->links);
240 pidlist_free(l->list);
241 put_pid_ns(l->key.ns);
242 tofree = l;
243 }
244
245 mutex_unlock(&l->owner->pidlist_mutex);
246 kfree(tofree);
247}
248
249
250
251
252
253static int pidlist_uniq(pid_t *list, int length)
254{
255 int src, dest = 1;
256
257
258
259
260
261 if (length == 0 || length == 1)
262 return length;
263
264 for (src = 1; src < length; src++) {
265
266 while (list[src] == list[src-1]) {
267 src++;
268 if (src == length)
269 goto after;
270 }
271
272 list[dest] = list[src];
273 dest++;
274 }
275after:
276 return dest;
277}
278
279
280
281
282
283
284
285
286
287
288static int cmppid(const void *a, const void *b)
289{
290 return *(pid_t *)a - *(pid_t *)b;
291}
292
293static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
294 enum cgroup_filetype type)
295{
296 struct cgroup_pidlist *l;
297
298 struct pid_namespace *ns = task_active_pid_ns(current);
299
300 lockdep_assert_held(&cgrp->pidlist_mutex);
301
302 list_for_each_entry(l, &cgrp->pidlists, links)
303 if (l->key.type == type && l->key.ns == ns)
304 return l;
305 return NULL;
306}
307
308
309
310
311
312
313
314static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
315 enum cgroup_filetype type)
316{
317 struct cgroup_pidlist *l;
318
319 lockdep_assert_held(&cgrp->pidlist_mutex);
320
321 l = cgroup_pidlist_find(cgrp, type);
322 if (l)
323 return l;
324
325
326 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
327 if (!l)
328 return l;
329
330 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
331 l->key.type = type;
332
333 l->key.ns = get_pid_ns(task_active_pid_ns(current));
334 l->owner = cgrp;
335 list_add(&l->links, &cgrp->pidlists);
336 return l;
337}
338
339
340
341
342
343int cgroup_task_count(const struct cgroup *cgrp)
344{
345 int count = 0;
346 struct cgrp_cset_link *link;
347
348 spin_lock_irq(&css_set_lock);
349 list_for_each_entry(link, &cgrp->cset_links, cset_link)
350 count += link->cset->nr_tasks;
351 spin_unlock_irq(&css_set_lock);
352 return count;
353}
354
355
356
357
358static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
359 struct cgroup_pidlist **lp)
360{
361 pid_t *array;
362 int length;
363 int pid, n = 0;
364 struct css_task_iter it;
365 struct task_struct *tsk;
366 struct cgroup_pidlist *l;
367
368 lockdep_assert_held(&cgrp->pidlist_mutex);
369
370
371
372
373
374
375
376 length = cgroup_task_count(cgrp);
377 array = pidlist_allocate(length);
378 if (!array)
379 return -ENOMEM;
380
381 css_task_iter_start(&cgrp->self, 0, &it);
382 while ((tsk = css_task_iter_next(&it))) {
383 if (unlikely(n == length))
384 break;
385
386 if (type == CGROUP_FILE_PROCS)
387 pid = task_tgid_vnr(tsk);
388 else
389 pid = task_pid_vnr(tsk);
390 if (pid > 0)
391 array[n++] = pid;
392 }
393 css_task_iter_end(&it);
394 length = n;
395
396 sort(array, length, sizeof(pid_t), cmppid, NULL);
397 if (type == CGROUP_FILE_PROCS)
398 length = pidlist_uniq(array, length);
399
400 l = cgroup_pidlist_find_create(cgrp, type);
401 if (!l) {
402 pidlist_free(array);
403 return -ENOMEM;
404 }
405
406
407 pidlist_free(l->list);
408 l->list = array;
409 l->length = length;
410 *lp = l;
411 return 0;
412}
413
414
415
416
417
418
419
420static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
421{
422
423
424
425
426
427
428 struct kernfs_open_file *of = s->private;
429 struct cgroup *cgrp = seq_css(s)->cgroup;
430 struct cgroup_pidlist *l;
431 enum cgroup_filetype type = seq_cft(s)->private;
432 int index = 0, pid = *pos;
433 int *iter, ret;
434
435 mutex_lock(&cgrp->pidlist_mutex);
436
437
438
439
440
441
442
443 if (of->priv)
444 of->priv = cgroup_pidlist_find(cgrp, type);
445
446
447
448
449
450 if (!of->priv) {
451 ret = pidlist_array_load(cgrp, type,
452 (struct cgroup_pidlist **)&of->priv);
453 if (ret)
454 return ERR_PTR(ret);
455 }
456 l = of->priv;
457
458 if (pid) {
459 int end = l->length;
460
461 while (index < end) {
462 int mid = (index + end) / 2;
463 if (l->list[mid] == pid) {
464 index = mid;
465 break;
466 } else if (l->list[mid] <= pid)
467 index = mid + 1;
468 else
469 end = mid;
470 }
471 }
472
473 if (index >= l->length)
474 return NULL;
475
476 iter = l->list + index;
477 *pos = *iter;
478 return iter;
479}
480
481static void cgroup_pidlist_stop(struct seq_file *s, void *v)
482{
483 struct kernfs_open_file *of = s->private;
484 struct cgroup_pidlist *l = of->priv;
485
486 if (l)
487 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
488 CGROUP_PIDLIST_DESTROY_DELAY);
489 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
490}
491
492static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
493{
494 struct kernfs_open_file *of = s->private;
495 struct cgroup_pidlist *l = of->priv;
496 pid_t *p = v;
497 pid_t *end = l->list + l->length;
498
499
500
501
502 p++;
503 if (p >= end) {
504 return NULL;
505 } else {
506 *pos = *p;
507 return p;
508 }
509}
510
511static int cgroup_pidlist_show(struct seq_file *s, void *v)
512{
513 seq_printf(s, "%d\n", *(int *)v);
514
515 return 0;
516}
517
518static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
519 char *buf, size_t nbytes, loff_t off,
520 bool threadgroup)
521{
522 struct cgroup *cgrp;
523 struct task_struct *task;
524 const struct cred *cred, *tcred;
525 ssize_t ret;
526
527 cgrp = cgroup_kn_lock_live(of->kn, false);
528 if (!cgrp)
529 return -ENODEV;
530
531 task = cgroup_procs_write_start(buf, threadgroup);
532 ret = PTR_ERR_OR_ZERO(task);
533 if (ret)
534 goto out_unlock;
535
536
537
538
539
540 cred = current_cred();
541 tcred = get_task_cred(task);
542 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
543 !uid_eq(cred->euid, tcred->uid) &&
544 !uid_eq(cred->euid, tcred->suid))
545 ret = -EACCES;
546 put_cred(tcred);
547 if (ret)
548 goto out_finish;
549
550 ret = cgroup_attach_task(cgrp, task, threadgroup);
551
552out_finish:
553 cgroup_procs_write_finish(task);
554out_unlock:
555 cgroup_kn_unlock(of->kn);
556
557 return ret ?: nbytes;
558}
559
560static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
561 char *buf, size_t nbytes, loff_t off)
562{
563 return __cgroup1_procs_write(of, buf, nbytes, off, true);
564}
565
566static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
567 char *buf, size_t nbytes, loff_t off)
568{
569 return __cgroup1_procs_write(of, buf, nbytes, off, false);
570}
571
572static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
573 char *buf, size_t nbytes, loff_t off)
574{
575 struct cgroup *cgrp;
576
577 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
578
579 cgrp = cgroup_kn_lock_live(of->kn, false);
580 if (!cgrp)
581 return -ENODEV;
582 spin_lock(&release_agent_path_lock);
583 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
584 sizeof(cgrp->root->release_agent_path));
585 spin_unlock(&release_agent_path_lock);
586 cgroup_kn_unlock(of->kn);
587 return nbytes;
588}
589
590static int cgroup_release_agent_show(struct seq_file *seq, void *v)
591{
592 struct cgroup *cgrp = seq_css(seq)->cgroup;
593
594 spin_lock(&release_agent_path_lock);
595 seq_puts(seq, cgrp->root->release_agent_path);
596 spin_unlock(&release_agent_path_lock);
597 seq_putc(seq, '\n');
598 return 0;
599}
600
601static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
602{
603 seq_puts(seq, "0\n");
604 return 0;
605}
606
607static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
608 struct cftype *cft)
609{
610 return notify_on_release(css->cgroup);
611}
612
613static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
614 struct cftype *cft, u64 val)
615{
616 if (val)
617 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
618 else
619 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
620 return 0;
621}
622
623static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
624 struct cftype *cft)
625{
626 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
627}
628
629static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
630 struct cftype *cft, u64 val)
631{
632 if (val)
633 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
634 else
635 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
636 return 0;
637}
638
639
640struct cftype cgroup1_base_files[] = {
641 {
642 .name = "cgroup.procs",
643 .seq_start = cgroup_pidlist_start,
644 .seq_next = cgroup_pidlist_next,
645 .seq_stop = cgroup_pidlist_stop,
646 .seq_show = cgroup_pidlist_show,
647 .private = CGROUP_FILE_PROCS,
648 .write = cgroup1_procs_write,
649 },
650 {
651 .name = "cgroup.clone_children",
652 .read_u64 = cgroup_clone_children_read,
653 .write_u64 = cgroup_clone_children_write,
654 },
655 {
656 .name = "cgroup.sane_behavior",
657 .flags = CFTYPE_ONLY_ON_ROOT,
658 .seq_show = cgroup_sane_behavior_show,
659 },
660 {
661 .name = "tasks",
662 .seq_start = cgroup_pidlist_start,
663 .seq_next = cgroup_pidlist_next,
664 .seq_stop = cgroup_pidlist_stop,
665 .seq_show = cgroup_pidlist_show,
666 .private = CGROUP_FILE_TASKS,
667 .write = cgroup1_tasks_write,
668 },
669 {
670 .name = "notify_on_release",
671 .read_u64 = cgroup_read_notify_on_release,
672 .write_u64 = cgroup_write_notify_on_release,
673 },
674 {
675 .name = "release_agent",
676 .flags = CFTYPE_ONLY_ON_ROOT,
677 .seq_show = cgroup_release_agent_show,
678 .write = cgroup_release_agent_write,
679 .max_write_len = PATH_MAX - 1,
680 },
681 { }
682};
683
684
685static int proc_cgroupstats_show(struct seq_file *m, void *v)
686{
687 struct cgroup_subsys *ss;
688 int i;
689
690 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
691
692
693
694
695
696 mutex_lock(&cgroup_mutex);
697
698 for_each_subsys(ss, i)
699 seq_printf(m, "%s\t%d\t%d\t%d\n",
700 ss->legacy_name, ss->root->hierarchy_id,
701 atomic_read(&ss->root->nr_cgrps),
702 cgroup_ssid_enabled(i));
703
704 mutex_unlock(&cgroup_mutex);
705 return 0;
706}
707
708static int cgroupstats_open(struct inode *inode, struct file *file)
709{
710 return single_open(file, proc_cgroupstats_show, NULL);
711}
712
713const struct file_operations proc_cgroupstats_operations = {
714 .open = cgroupstats_open,
715 .read = seq_read,
716 .llseek = seq_lseek,
717 .release = single_release,
718};
719
720
721
722
723
724
725
726
727
728
729int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
730{
731 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
732 struct cgroup *cgrp;
733 struct css_task_iter it;
734 struct task_struct *tsk;
735
736
737 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
738 kernfs_type(kn) != KERNFS_DIR)
739 return -EINVAL;
740
741 mutex_lock(&cgroup_mutex);
742
743
744
745
746
747
748 rcu_read_lock();
749 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
750 if (!cgrp || cgroup_is_dead(cgrp)) {
751 rcu_read_unlock();
752 mutex_unlock(&cgroup_mutex);
753 return -ENOENT;
754 }
755 rcu_read_unlock();
756
757 css_task_iter_start(&cgrp->self, 0, &it);
758 while ((tsk = css_task_iter_next(&it))) {
759 switch (tsk->state) {
760 case TASK_RUNNING:
761 stats->nr_running++;
762 break;
763 case TASK_INTERRUPTIBLE:
764 stats->nr_sleeping++;
765 break;
766 case TASK_UNINTERRUPTIBLE:
767 stats->nr_uninterruptible++;
768 break;
769 case TASK_STOPPED:
770 stats->nr_stopped++;
771 break;
772 default:
773 if (delayacct_is_task_waiting_on_io(tsk))
774 stats->nr_io_wait++;
775 break;
776 }
777 }
778 css_task_iter_end(&it);
779
780 mutex_unlock(&cgroup_mutex);
781 return 0;
782}
783
784void cgroup1_check_for_release(struct cgroup *cgrp)
785{
786 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
787 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
788 schedule_work(&cgrp->release_agent_work);
789}
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814void cgroup1_release_agent(struct work_struct *work)
815{
816 struct cgroup *cgrp =
817 container_of(work, struct cgroup, release_agent_work);
818 char *pathbuf = NULL, *agentbuf = NULL;
819 char *argv[3], *envp[3];
820 int ret;
821
822 mutex_lock(&cgroup_mutex);
823
824 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
825 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
826 if (!pathbuf || !agentbuf)
827 goto out;
828
829 spin_lock_irq(&css_set_lock);
830 ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
831 spin_unlock_irq(&css_set_lock);
832 if (ret < 0 || ret >= PATH_MAX)
833 goto out;
834
835 argv[0] = agentbuf;
836 argv[1] = pathbuf;
837 argv[2] = NULL;
838
839
840 envp[0] = "HOME=/";
841 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
842 envp[2] = NULL;
843
844 mutex_unlock(&cgroup_mutex);
845 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
846 goto out_free;
847out:
848 mutex_unlock(&cgroup_mutex);
849out_free:
850 kfree(agentbuf);
851 kfree(pathbuf);
852}
853
854
855
856
857static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
858 const char *new_name_str)
859{
860 struct cgroup *cgrp = kn->priv;
861 int ret;
862
863 if (kernfs_type(kn) != KERNFS_DIR)
864 return -ENOTDIR;
865 if (kn->parent != new_parent)
866 return -EIO;
867
868
869
870
871
872
873 kernfs_break_active_protection(new_parent);
874 kernfs_break_active_protection(kn);
875
876 mutex_lock(&cgroup_mutex);
877
878 ret = kernfs_rename(kn, new_parent, new_name_str);
879 if (!ret)
880 trace_cgroup_rename(cgrp);
881
882 mutex_unlock(&cgroup_mutex);
883
884 kernfs_unbreak_active_protection(kn);
885 kernfs_unbreak_active_protection(new_parent);
886 return ret;
887}
888
889static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
890{
891 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
892 struct cgroup_subsys *ss;
893 int ssid;
894
895 for_each_subsys(ss, ssid)
896 if (root->subsys_mask & (1 << ssid))
897 seq_show_option(seq, ss->legacy_name, NULL);
898 if (root->flags & CGRP_ROOT_NOPREFIX)
899 seq_puts(seq, ",noprefix");
900 if (root->flags & CGRP_ROOT_XATTR)
901 seq_puts(seq, ",xattr");
902 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
903 seq_puts(seq, ",cpuset_v2_mode");
904
905 spin_lock(&release_agent_path_lock);
906 if (strlen(root->release_agent_path))
907 seq_show_option(seq, "release_agent",
908 root->release_agent_path);
909 spin_unlock(&release_agent_path_lock);
910
911 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
912 seq_puts(seq, ",clone_children");
913 if (strlen(root->name))
914 seq_show_option(seq, "name", root->name);
915 return 0;
916}
917
918static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
919{
920 char *token, *o = data;
921 bool all_ss = false, one_ss = false;
922 u16 mask = U16_MAX;
923 struct cgroup_subsys *ss;
924 int nr_opts = 0;
925 int i;
926
927#ifdef CONFIG_CPUSETS
928 mask = ~((u16)1 << cpuset_cgrp_id);
929#endif
930
931 memset(opts, 0, sizeof(*opts));
932
933 while ((token = strsep(&o, ",")) != NULL) {
934 nr_opts++;
935
936 if (!*token)
937 return -EINVAL;
938 if (!strcmp(token, "none")) {
939
940 opts->none = true;
941 continue;
942 }
943 if (!strcmp(token, "all")) {
944
945 if (one_ss)
946 return -EINVAL;
947 all_ss = true;
948 continue;
949 }
950 if (!strcmp(token, "noprefix")) {
951 opts->flags |= CGRP_ROOT_NOPREFIX;
952 continue;
953 }
954 if (!strcmp(token, "clone_children")) {
955 opts->cpuset_clone_children = true;
956 continue;
957 }
958 if (!strcmp(token, "cpuset_v2_mode")) {
959 opts->flags |= CGRP_ROOT_CPUSET_V2_MODE;
960 continue;
961 }
962 if (!strcmp(token, "xattr")) {
963 opts->flags |= CGRP_ROOT_XATTR;
964 continue;
965 }
966 if (!strncmp(token, "release_agent=", 14)) {
967
968 if (opts->release_agent)
969 return -EINVAL;
970 opts->release_agent =
971 kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
972 if (!opts->release_agent)
973 return -ENOMEM;
974 continue;
975 }
976 if (!strncmp(token, "name=", 5)) {
977 const char *name = token + 5;
978
979 if (!strlen(name))
980 return -EINVAL;
981
982 for (i = 0; i < strlen(name); i++) {
983 char c = name[i];
984 if (isalnum(c))
985 continue;
986 if ((c == '.') || (c == '-') || (c == '_'))
987 continue;
988 return -EINVAL;
989 }
990
991 if (opts->name)
992 return -EINVAL;
993 opts->name = kstrndup(name,
994 MAX_CGROUP_ROOT_NAMELEN - 1,
995 GFP_KERNEL);
996 if (!opts->name)
997 return -ENOMEM;
998
999 continue;
1000 }
1001
1002 for_each_subsys(ss, i) {
1003 if (strcmp(token, ss->legacy_name))
1004 continue;
1005 if (!cgroup_ssid_enabled(i))
1006 continue;
1007 if (cgroup1_ssid_disabled(i))
1008 continue;
1009
1010
1011 if (all_ss)
1012 return -EINVAL;
1013 opts->subsys_mask |= (1 << i);
1014 one_ss = true;
1015
1016 break;
1017 }
1018 if (i == CGROUP_SUBSYS_COUNT)
1019 return -ENOENT;
1020 }
1021
1022
1023
1024
1025
1026
1027 if (all_ss || (!one_ss && !opts->none && !opts->name))
1028 for_each_subsys(ss, i)
1029 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1030 opts->subsys_mask |= (1 << i);
1031
1032
1033
1034
1035
1036 if (!opts->subsys_mask && !opts->name)
1037 return -EINVAL;
1038
1039
1040
1041
1042
1043
1044 if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1045 return -EINVAL;
1046
1047
1048 if (opts->subsys_mask && opts->none)
1049 return -EINVAL;
1050
1051 return 0;
1052}
1053
1054static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
1055{
1056 int ret = 0;
1057 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1058 struct cgroup_sb_opts opts;
1059 u16 added_mask, removed_mask;
1060
1061 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1062
1063
1064 ret = parse_cgroupfs_options(data, &opts);
1065 if (ret)
1066 goto out_unlock;
1067
1068 if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1069 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1070 task_tgid_nr(current), current->comm);
1071
1072 added_mask = opts.subsys_mask & ~root->subsys_mask;
1073 removed_mask = root->subsys_mask & ~opts.subsys_mask;
1074
1075
1076 if ((opts.flags ^ root->flags) ||
1077 (opts.name && strcmp(opts.name, root->name))) {
1078 pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1079 opts.flags, opts.name ?: "", root->flags, root->name);
1080 ret = -EINVAL;
1081 goto out_unlock;
1082 }
1083
1084
1085 if (!list_empty(&root->cgrp.self.children)) {
1086 ret = -EBUSY;
1087 goto out_unlock;
1088 }
1089
1090 ret = rebind_subsystems(root, added_mask);
1091 if (ret)
1092 goto out_unlock;
1093
1094 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1095
1096 if (opts.release_agent) {
1097 spin_lock(&release_agent_path_lock);
1098 strcpy(root->release_agent_path, opts.release_agent);
1099 spin_unlock(&release_agent_path_lock);
1100 }
1101
1102 trace_cgroup_remount(root);
1103
1104 out_unlock:
1105 kfree(opts.release_agent);
1106 kfree(opts.name);
1107 mutex_unlock(&cgroup_mutex);
1108 return ret;
1109}
1110
1111struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1112 .rename = cgroup1_rename,
1113 .show_options = cgroup1_show_options,
1114 .remount_fs = cgroup1_remount,
1115 .mkdir = cgroup_mkdir,
1116 .rmdir = cgroup_rmdir,
1117 .show_path = cgroup_show_path,
1118};
1119
1120struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
1121 void *data, unsigned long magic,
1122 struct cgroup_namespace *ns)
1123{
1124 struct super_block *pinned_sb = NULL;
1125 struct cgroup_sb_opts opts;
1126 struct cgroup_root *root;
1127 struct cgroup_subsys *ss;
1128 struct dentry *dentry;
1129 int i, ret;
1130 bool new_root = false;
1131
1132 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1133
1134
1135 ret = parse_cgroupfs_options(data, &opts);
1136 if (ret)
1137 goto out_unlock;
1138
1139
1140
1141
1142
1143
1144
1145
1146 for_each_subsys(ss, i) {
1147 if (!(opts.subsys_mask & (1 << i)) ||
1148 ss->root == &cgrp_dfl_root)
1149 continue;
1150
1151 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1152 mutex_unlock(&cgroup_mutex);
1153 msleep(10);
1154 ret = restart_syscall();
1155 goto out_free;
1156 }
1157 cgroup_put(&ss->root->cgrp);
1158 }
1159
1160 for_each_root(root) {
1161 bool name_match = false;
1162
1163 if (root == &cgrp_dfl_root)
1164 continue;
1165
1166
1167
1168
1169
1170
1171 if (opts.name) {
1172 if (strcmp(opts.name, root->name))
1173 continue;
1174 name_match = true;
1175 }
1176
1177
1178
1179
1180
1181 if ((opts.subsys_mask || opts.none) &&
1182 (opts.subsys_mask != root->subsys_mask)) {
1183 if (!name_match)
1184 continue;
1185 ret = -EBUSY;
1186 goto out_unlock;
1187 }
1188
1189 if (root->flags ^ opts.flags)
1190 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1205 if (IS_ERR(pinned_sb) ||
1206 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1207 mutex_unlock(&cgroup_mutex);
1208 if (!IS_ERR_OR_NULL(pinned_sb))
1209 deactivate_super(pinned_sb);
1210 msleep(10);
1211 ret = restart_syscall();
1212 goto out_free;
1213 }
1214
1215 ret = 0;
1216 goto out_unlock;
1217 }
1218
1219
1220
1221
1222
1223
1224 if (!opts.subsys_mask && !opts.none) {
1225 ret = -EINVAL;
1226 goto out_unlock;
1227 }
1228
1229
1230 if (ns != &init_cgroup_ns) {
1231 ret = -EPERM;
1232 goto out_unlock;
1233 }
1234
1235 root = kzalloc(sizeof(*root), GFP_KERNEL);
1236 if (!root) {
1237 ret = -ENOMEM;
1238 goto out_unlock;
1239 }
1240 new_root = true;
1241
1242 init_cgroup_root(root, &opts);
1243
1244 ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD);
1245 if (ret)
1246 cgroup_free_root(root);
1247
1248out_unlock:
1249 mutex_unlock(&cgroup_mutex);
1250out_free:
1251 kfree(opts.release_agent);
1252 kfree(opts.name);
1253
1254 if (ret)
1255 return ERR_PTR(ret);
1256
1257 dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
1258 CGROUP_SUPER_MAGIC, ns);
1259
1260
1261
1262
1263
1264
1265
1266 if (new_root) {
1267 mutex_lock(&cgroup_mutex);
1268 percpu_ref_reinit(&root->cgrp.self.refcnt);
1269 mutex_unlock(&cgroup_mutex);
1270 }
1271
1272
1273
1274
1275
1276 if (pinned_sb)
1277 deactivate_super(pinned_sb);
1278
1279 return dentry;
1280}
1281
1282static int __init cgroup1_wq_init(void)
1283{
1284
1285
1286
1287
1288 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1289 0, 1);
1290 BUG_ON(!cgroup_pidlist_destroy_wq);
1291 return 0;
1292}
1293core_initcall(cgroup1_wq_init);
1294
1295static int __init cgroup_no_v1(char *str)
1296{
1297 struct cgroup_subsys *ss;
1298 char *token;
1299 int i;
1300
1301 while ((token = strsep(&str, ",")) != NULL) {
1302 if (!*token)
1303 continue;
1304
1305 if (!strcmp(token, "all")) {
1306 cgroup_no_v1_mask = U16_MAX;
1307 break;
1308 }
1309
1310 for_each_subsys(ss, i) {
1311 if (strcmp(token, ss->name) &&
1312 strcmp(token, ss->legacy_name))
1313 continue;
1314
1315 cgroup_no_v1_mask |= 1 << i;
1316 }
1317 }
1318 return 1;
1319}
1320__setup("cgroup_no_v1=", cgroup_no_v1);
1321