1
2#ifndef _LINUX_CGROUP_H
3#define _LINUX_CGROUP_H
4
5
6
7
8
9
10
11
12#include <linux/sched.h>
13#include <linux/cpumask.h>
14#include <linux/nodemask.h>
15#include <linux/rculist.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs.h>
18#include <linux/seq_file.h>
19#include <linux/kernfs.h>
20#include <linux/jump_label.h>
21#include <linux/types.h>
22#include <linux/ns_common.h>
23#include <linux/nsproxy.h>
24#include <linux/user_namespace.h>
25#include <linux/refcount.h>
26#include <linux/kernel_stat.h>
27
28#include <linux/cgroup-defs.h>
29
30struct kernel_clone_args;
31
32#ifdef CONFIG_CGROUPS
33
34
35
36
37
38
39#define CGROUP_WEIGHT_MIN 1
40#define CGROUP_WEIGHT_DFL 100
41#define CGROUP_WEIGHT_MAX 10000
42
43
44#define CSS_TASK_ITER_PROCS (1U << 0)
45
46#define CSS_TASK_ITER_THREADED (1U << 1)
47
48
49#define CSS_TASK_ITER_SKIPPED (1U << 16)
50
51
52struct css_task_iter {
53 struct cgroup_subsys *ss;
54 unsigned int flags;
55
56 struct list_head *cset_pos;
57 struct list_head *cset_head;
58
59 struct list_head *tcset_pos;
60 struct list_head *tcset_head;
61
62 struct list_head *task_pos;
63
64 struct list_head *cur_tasks_head;
65 struct css_set *cur_cset;
66 struct css_set *cur_dcset;
67 struct task_struct *cur_task;
68 struct list_head iters_node;
69};
70
71extern struct cgroup_root cgrp_dfl_root;
72extern struct css_set init_css_set;
73
74#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
75#include <linux/cgroup_subsys.h>
76#undef SUBSYS
77
78#define SUBSYS(_x) \
79 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
80 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
81#include <linux/cgroup_subsys.h>
82#undef SUBSYS
83
84
85
86
87
88#define cgroup_subsys_enabled(ss) \
89 static_branch_likely(&ss ## _enabled_key)
90
91
92
93
94
95#define cgroup_subsys_on_dfl(ss) \
96 static_branch_likely(&ss ## _on_dfl_key)
97
98bool css_has_online_children(struct cgroup_subsys_state *css);
99struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
100struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
101 struct cgroup_subsys *ss);
102struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
103 struct cgroup_subsys *ss);
104struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
105 struct cgroup_subsys *ss);
106
107struct cgroup *cgroup_get_from_path(const char *path);
108struct cgroup *cgroup_get_from_fd(int fd);
109
110int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
111int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
112
113int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
114int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
115int cgroup_rm_cftypes(struct cftype *cfts);
116void cgroup_file_notify(struct cgroup_file *cfile);
117
118int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
119int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
120int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
121 struct pid *pid, struct task_struct *tsk);
122
123void cgroup_fork(struct task_struct *p);
124extern int cgroup_can_fork(struct task_struct *p,
125 struct kernel_clone_args *kargs);
126extern void cgroup_cancel_fork(struct task_struct *p,
127 struct kernel_clone_args *kargs);
128extern void cgroup_post_fork(struct task_struct *p,
129 struct kernel_clone_args *kargs);
130void cgroup_exit(struct task_struct *p);
131void cgroup_release(struct task_struct *p);
132void cgroup_free(struct task_struct *p);
133
134int cgroup_init_early(void);
135int cgroup_init(void);
136
137int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
138
139
140
141
142
143struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
144 struct cgroup_subsys_state *parent);
145struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
146 struct cgroup_subsys_state *css);
147struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
148struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
149 struct cgroup_subsys_state *css);
150
151struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
152 struct cgroup_subsys_state **dst_cssp);
153struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
154 struct cgroup_subsys_state **dst_cssp);
155
156void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
157 struct css_task_iter *it);
158struct task_struct *css_task_iter_next(struct css_task_iter *it);
159void css_task_iter_end(struct css_task_iter *it);
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179#define css_for_each_child(pos, parent) \
180 for ((pos) = css_next_child(NULL, (parent)); (pos); \
181 (pos) = css_next_child((pos), (parent)))
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239#define css_for_each_descendant_pre(pos, css) \
240 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
241 (pos) = css_next_descendant_pre((pos), (css)))
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262#define css_for_each_descendant_post(pos, css) \
263 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
264 (pos) = css_next_descendant_post((pos), (css)))
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284#define cgroup_taskset_for_each(task, dst_css, tset) \
285 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
286 (task); \
287 (task) = cgroup_taskset_next((tset), &(dst_css)))
288
289
290
291
292
293
294
295
296
297
298#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
299 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
300 (leader); \
301 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
302 if ((leader) != (leader)->group_leader) \
303 ; \
304 else
305
306
307
308
309
310static inline u64 cgroup_id(struct cgroup *cgrp)
311{
312 return cgrp->kn->id;
313}
314
315
316
317
318
319
320
321static inline void css_get(struct cgroup_subsys_state *css)
322{
323 if (!(css->flags & CSS_NO_REF))
324 percpu_ref_get(&css->refcnt);
325}
326
327
328
329
330
331
332
333
334static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
335{
336 if (!(css->flags & CSS_NO_REF))
337 percpu_ref_get_many(&css->refcnt, n);
338}
339
340
341
342
343
344
345
346
347
348
349
350
351static inline bool css_tryget(struct cgroup_subsys_state *css)
352{
353 if (!(css->flags & CSS_NO_REF))
354 return percpu_ref_tryget(&css->refcnt);
355 return true;
356}
357
358
359
360
361
362
363
364
365
366
367
368static inline bool css_tryget_online(struct cgroup_subsys_state *css)
369{
370 if (!(css->flags & CSS_NO_REF))
371 return percpu_ref_tryget_live(&css->refcnt);
372 return true;
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390static inline bool css_is_dying(struct cgroup_subsys_state *css)
391{
392 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
393}
394
395
396
397
398
399
400
401static inline void css_put(struct cgroup_subsys_state *css)
402{
403 if (!(css->flags & CSS_NO_REF))
404 percpu_ref_put(&css->refcnt);
405}
406
407
408
409
410
411
412
413
414static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
415{
416 if (!(css->flags & CSS_NO_REF))
417 percpu_ref_put_many(&css->refcnt, n);
418}
419
420static inline void cgroup_get(struct cgroup *cgrp)
421{
422 css_get(&cgrp->self);
423}
424
425static inline bool cgroup_tryget(struct cgroup *cgrp)
426{
427 return css_tryget(&cgrp->self);
428}
429
430static inline void cgroup_put(struct cgroup *cgrp)
431{
432 css_put(&cgrp->self);
433}
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448#ifdef CONFIG_PROVE_RCU
449extern struct mutex cgroup_mutex;
450extern spinlock_t css_set_lock;
451#define task_css_set_check(task, __c) \
452 rcu_dereference_check((task)->cgroups, \
453 lockdep_is_held(&cgroup_mutex) || \
454 lockdep_is_held(&css_set_lock) || \
455 ((task)->flags & PF_EXITING) || (__c))
456#else
457#define task_css_set_check(task, __c) \
458 rcu_dereference((task)->cgroups)
459#endif
460
461
462
463
464
465
466
467
468
469
470#define task_css_check(task, subsys_id, __c) \
471 task_css_set_check((task), (__c))->subsys[(subsys_id)]
472
473
474
475
476
477
478
479static inline struct css_set *task_css_set(struct task_struct *task)
480{
481 return task_css_set_check(task, false);
482}
483
484
485
486
487
488
489
490
491static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
492 int subsys_id)
493{
494 return task_css_check(task, subsys_id, false);
495}
496
497
498
499
500
501
502
503
504
505
506static inline struct cgroup_subsys_state *
507task_get_css(struct task_struct *task, int subsys_id)
508{
509 struct cgroup_subsys_state *css;
510
511 rcu_read_lock();
512 while (true) {
513 css = task_css(task, subsys_id);
514
515
516
517
518
519
520 if (likely(css_tryget(css)))
521 break;
522 cpu_relax();
523 }
524 rcu_read_unlock();
525 return css;
526}
527
528
529
530
531
532
533
534
535
536static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
537{
538 return task_css_check(task, subsys_id, true) ==
539 init_css_set.subsys[subsys_id];
540}
541
542static inline struct cgroup *task_cgroup(struct task_struct *task,
543 int subsys_id)
544{
545 return task_css(task, subsys_id)->cgroup;
546}
547
548static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
549{
550 return task_css_set(task)->dfl_cgrp;
551}
552
553static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
554{
555 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
556
557 if (parent_css)
558 return container_of(parent_css, struct cgroup, self);
559 return NULL;
560}
561
562
563
564
565
566
567
568
569
570
571static inline bool cgroup_is_descendant(struct cgroup *cgrp,
572 struct cgroup *ancestor)
573{
574 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
575 return false;
576 return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
577}
578
579
580
581
582
583
584
585
586
587
588
589
590static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
591 int ancestor_level)
592{
593 if (cgrp->level < ancestor_level)
594 return NULL;
595 while (cgrp && cgrp->level > ancestor_level)
596 cgrp = cgroup_parent(cgrp);
597 return cgrp;
598}
599
600
601
602
603
604
605
606
607
608
609static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
610 struct cgroup *ancestor)
611{
612 struct css_set *cset = task_css_set(task);
613
614 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
615}
616
617
618static inline bool cgroup_is_populated(struct cgroup *cgrp)
619{
620 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
621 cgrp->nr_populated_threaded_children;
622}
623
624
625static inline ino_t cgroup_ino(struct cgroup *cgrp)
626{
627 return kernfs_ino(cgrp->kn);
628}
629
630
631static inline struct cftype *of_cft(struct kernfs_open_file *of)
632{
633 return of->kn->priv;
634}
635
636struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
637
638
639static inline struct cftype *seq_cft(struct seq_file *seq)
640{
641 return of_cft(seq->private);
642}
643
644static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
645{
646 return of_css(seq->private);
647}
648
649
650
651
652
653
654static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
655{
656 return kernfs_name(cgrp->kn, buf, buflen);
657}
658
659static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
660{
661 return kernfs_path(cgrp->kn, buf, buflen);
662}
663
664static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
665{
666 pr_cont_kernfs_name(cgrp->kn);
667}
668
669static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
670{
671 pr_cont_kernfs_path(cgrp->kn);
672}
673
674static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
675{
676 return &cgrp->psi;
677}
678
679static inline void cgroup_init_kthreadd(void)
680{
681
682
683
684
685
686 current->no_cgroup_migration = 1;
687}
688
689static inline void cgroup_kthread_ready(void)
690{
691
692
693
694
695 current->no_cgroup_migration = 0;
696}
697
698void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
699#else
700
701struct cgroup_subsys_state;
702struct cgroup;
703
704static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
705static inline void css_get(struct cgroup_subsys_state *css) {}
706static inline void css_put(struct cgroup_subsys_state *css) {}
707static inline int cgroup_attach_task_all(struct task_struct *from,
708 struct task_struct *t) { return 0; }
709static inline int cgroupstats_build(struct cgroupstats *stats,
710 struct dentry *dentry) { return -EINVAL; }
711
712static inline void cgroup_fork(struct task_struct *p) {}
713static inline int cgroup_can_fork(struct task_struct *p,
714 struct kernel_clone_args *kargs) { return 0; }
715static inline void cgroup_cancel_fork(struct task_struct *p,
716 struct kernel_clone_args *kargs) {}
717static inline void cgroup_post_fork(struct task_struct *p,
718 struct kernel_clone_args *kargs) {}
719static inline void cgroup_exit(struct task_struct *p) {}
720static inline void cgroup_release(struct task_struct *p) {}
721static inline void cgroup_free(struct task_struct *p) {}
722
723static inline int cgroup_init_early(void) { return 0; }
724static inline int cgroup_init(void) { return 0; }
725static inline void cgroup_init_kthreadd(void) {}
726static inline void cgroup_kthread_ready(void) {}
727
728static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
729{
730 return NULL;
731}
732
733static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
734{
735 return NULL;
736}
737
738static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
739 struct cgroup *ancestor)
740{
741 return true;
742}
743
744static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
745{}
746#endif
747
748#ifdef CONFIG_CGROUPS
749
750
751
752void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
753void cgroup_rstat_flush(struct cgroup *cgrp);
754void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
755void cgroup_rstat_flush_hold(struct cgroup *cgrp);
756void cgroup_rstat_flush_release(void);
757
758
759
760
761#ifdef CONFIG_CGROUP_CPUACCT
762void cpuacct_charge(struct task_struct *tsk, u64 cputime);
763void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
764#else
765static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
766static inline void cpuacct_account_field(struct task_struct *tsk, int index,
767 u64 val) {}
768#endif
769
770void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
771void __cgroup_account_cputime_field(struct cgroup *cgrp,
772 enum cpu_usage_stat index, u64 delta_exec);
773
774static inline void cgroup_account_cputime(struct task_struct *task,
775 u64 delta_exec)
776{
777 struct cgroup *cgrp;
778
779 cpuacct_charge(task, delta_exec);
780
781 rcu_read_lock();
782 cgrp = task_dfl_cgroup(task);
783 if (cgroup_parent(cgrp))
784 __cgroup_account_cputime(cgrp, delta_exec);
785 rcu_read_unlock();
786}
787
788static inline void cgroup_account_cputime_field(struct task_struct *task,
789 enum cpu_usage_stat index,
790 u64 delta_exec)
791{
792 struct cgroup *cgrp;
793
794 cpuacct_account_field(task, index, delta_exec);
795
796 rcu_read_lock();
797 cgrp = task_dfl_cgroup(task);
798 if (cgroup_parent(cgrp))
799 __cgroup_account_cputime_field(cgrp, index, delta_exec);
800 rcu_read_unlock();
801}
802
803#else
804
805static inline void cgroup_account_cputime(struct task_struct *task,
806 u64 delta_exec) {}
807static inline void cgroup_account_cputime_field(struct task_struct *task,
808 enum cpu_usage_stat index,
809 u64 delta_exec) {}
810
811#endif
812
813
814
815
816
817#ifdef CONFIG_SOCK_CGROUP_DATA
818
819#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
820extern spinlock_t cgroup_sk_update_lock;
821#endif
822
823void cgroup_sk_alloc_disable(void);
824void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
825void cgroup_sk_free(struct sock_cgroup_data *skcd);
826
827static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
828{
829#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
830 unsigned long v;
831
832
833
834
835
836 v = READ_ONCE(skcd->val);
837
838 if (v & 1)
839 return &cgrp_dfl_root.cgrp;
840
841 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
842#else
843 return (struct cgroup *)(unsigned long)skcd->val;
844#endif
845}
846
847#else
848
849static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
850static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
851
852#endif
853
854struct cgroup_namespace {
855 refcount_t count;
856 struct ns_common ns;
857 struct user_namespace *user_ns;
858 struct ucounts *ucounts;
859 struct css_set *root_cset;
860};
861
862extern struct cgroup_namespace init_cgroup_ns;
863
864#ifdef CONFIG_CGROUPS
865
866void free_cgroup_ns(struct cgroup_namespace *ns);
867
868struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
869 struct user_namespace *user_ns,
870 struct cgroup_namespace *old_ns);
871
872int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
873 struct cgroup_namespace *ns);
874
875#else
876
877static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
878static inline struct cgroup_namespace *
879copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
880 struct cgroup_namespace *old_ns)
881{
882 return old_ns;
883}
884
885#endif
886
887static inline void get_cgroup_ns(struct cgroup_namespace *ns)
888{
889 if (ns)
890 refcount_inc(&ns->count);
891}
892
893static inline void put_cgroup_ns(struct cgroup_namespace *ns)
894{
895 if (ns && refcount_dec_and_test(&ns->count))
896 free_cgroup_ns(ns);
897}
898
899#ifdef CONFIG_CGROUPS
900
901void cgroup_enter_frozen(void);
902void cgroup_leave_frozen(bool always_leave);
903void cgroup_update_frozen(struct cgroup *cgrp);
904void cgroup_freeze(struct cgroup *cgrp, bool freeze);
905void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
906 struct cgroup *dst);
907
908static inline bool cgroup_task_freeze(struct task_struct *task)
909{
910 bool ret;
911
912 if (task->flags & PF_KTHREAD)
913 return false;
914
915 rcu_read_lock();
916 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
917 rcu_read_unlock();
918
919 return ret;
920}
921
922static inline bool cgroup_task_frozen(struct task_struct *task)
923{
924 return task->frozen;
925}
926
927#else
928
929static inline void cgroup_enter_frozen(void) { }
930static inline void cgroup_leave_frozen(bool always_leave) { }
931static inline bool cgroup_task_freeze(struct task_struct *task)
932{
933 return false;
934}
935static inline bool cgroup_task_frozen(struct task_struct *task)
936{
937 return false;
938}
939
940#endif
941
942#ifdef CONFIG_CGROUP_BPF
943static inline void cgroup_bpf_get(struct cgroup *cgrp)
944{
945 percpu_ref_get(&cgrp->bpf.refcnt);
946}
947
948static inline void cgroup_bpf_put(struct cgroup *cgrp)
949{
950 percpu_ref_put(&cgrp->bpf.refcnt);
951}
952
953#else
954
955static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
956static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
957
958#endif
959
960#endif
961