1
2#ifndef _LINUX_CGROUP_H
3#define _LINUX_CGROUP_H
4
5
6
7
8
9
10
11
12#include <linux/sched.h>
13#include <linux/cpumask.h>
14#include <linux/nodemask.h>
15#include <linux/rculist.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs.h>
18#include <linux/seq_file.h>
19#include <linux/kernfs.h>
20#include <linux/jump_label.h>
21#include <linux/types.h>
22#include <linux/ns_common.h>
23#include <linux/nsproxy.h>
24#include <linux/user_namespace.h>
25#include <linux/refcount.h>
26#include <linux/kernel_stat.h>
27
28#include <linux/cgroup-defs.h>
29
30#ifdef CONFIG_CGROUPS
31
32
33
34
35
36
37#define CGROUP_WEIGHT_MIN 1
38#define CGROUP_WEIGHT_DFL 100
39#define CGROUP_WEIGHT_MAX 10000
40
41
42#define CSS_TASK_ITER_PROCS (1U << 0)
43
44#define CSS_TASK_ITER_THREADED (1U << 1)
45
46
47#define CSS_TASK_ITER_SKIPPED (1U << 16)
48
49
50struct css_task_iter {
51 struct cgroup_subsys *ss;
52 unsigned int flags;
53
54 struct list_head *cset_pos;
55 struct list_head *cset_head;
56
57 struct list_head *tcset_pos;
58 struct list_head *tcset_head;
59
60 struct list_head *task_pos;
61 struct list_head *tasks_head;
62 struct list_head *mg_tasks_head;
63 struct list_head *dying_tasks_head;
64
65 struct css_set *cur_cset;
66 struct css_set *cur_dcset;
67 struct task_struct *cur_task;
68 struct list_head iters_node;
69};
70
71extern struct cgroup_root cgrp_dfl_root;
72extern struct css_set init_css_set;
73
74#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
75#include <linux/cgroup_subsys.h>
76#undef SUBSYS
77
78#define SUBSYS(_x) \
79 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
80 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
81#include <linux/cgroup_subsys.h>
82#undef SUBSYS
83
84
85
86
87
88#define cgroup_subsys_enabled(ss) \
89 static_branch_likely(&ss ## _enabled_key)
90
91
92
93
94
95#define cgroup_subsys_on_dfl(ss) \
96 static_branch_likely(&ss ## _on_dfl_key)
97
98bool css_has_online_children(struct cgroup_subsys_state *css);
99struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
100struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
101 struct cgroup_subsys *ss);
102struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
103 struct cgroup_subsys *ss);
104struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
105 struct cgroup_subsys *ss);
106
107struct cgroup *cgroup_get_from_path(const char *path);
108struct cgroup *cgroup_get_from_fd(int fd);
109
110int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
111int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
112
113int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
114int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
115int cgroup_rm_cftypes(struct cftype *cfts);
116void cgroup_file_notify(struct cgroup_file *cfile);
117
118int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
119int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
120int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
121 struct pid *pid, struct task_struct *tsk);
122
123void cgroup_fork(struct task_struct *p);
124extern int cgroup_can_fork(struct task_struct *p);
125extern void cgroup_cancel_fork(struct task_struct *p);
126extern void cgroup_post_fork(struct task_struct *p);
127void cgroup_exit(struct task_struct *p);
128void cgroup_release(struct task_struct *p);
129void cgroup_free(struct task_struct *p);
130
131int cgroup_init_early(void);
132int cgroup_init(void);
133
134int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
135
136
137
138
139
140struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
141 struct cgroup_subsys_state *parent);
142struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
143 struct cgroup_subsys_state *css);
144struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
145struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
146 struct cgroup_subsys_state *css);
147
148struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
149 struct cgroup_subsys_state **dst_cssp);
150struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
151 struct cgroup_subsys_state **dst_cssp);
152
153void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
154 struct css_task_iter *it);
155struct task_struct *css_task_iter_next(struct css_task_iter *it);
156void css_task_iter_end(struct css_task_iter *it);
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176#define css_for_each_child(pos, parent) \
177 for ((pos) = css_next_child(NULL, (parent)); (pos); \
178 (pos) = css_next_child((pos), (parent)))
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236#define css_for_each_descendant_pre(pos, css) \
237 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
238 (pos) = css_next_descendant_pre((pos), (css)))
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259#define css_for_each_descendant_post(pos, css) \
260 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
261 (pos) = css_next_descendant_post((pos), (css)))
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281#define cgroup_taskset_for_each(task, dst_css, tset) \
282 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
283 (task); \
284 (task) = cgroup_taskset_next((tset), &(dst_css)))
285
286
287
288
289
290
291
292
293
294
295#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
296 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
297 (leader); \
298 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
299 if ((leader) != (leader)->group_leader) \
300 ; \
301 else
302
303
304
305
306
307
308
309
310
311
312
313static inline void css_get(struct cgroup_subsys_state *css)
314{
315 if (!(css->flags & CSS_NO_REF))
316 percpu_ref_get(&css->refcnt);
317}
318
319
320
321
322
323
324
325
326static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
327{
328 if (!(css->flags & CSS_NO_REF))
329 percpu_ref_get_many(&css->refcnt, n);
330}
331
332
333
334
335
336
337
338
339
340
341
342
343static inline bool css_tryget(struct cgroup_subsys_state *css)
344{
345 if (!(css->flags & CSS_NO_REF))
346 return percpu_ref_tryget(&css->refcnt);
347 return true;
348}
349
350
351
352
353
354
355
356
357
358
359
360static inline bool css_tryget_online(struct cgroup_subsys_state *css)
361{
362 if (!(css->flags & CSS_NO_REF))
363 return percpu_ref_tryget_live(&css->refcnt);
364 return true;
365}
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382static inline bool css_is_dying(struct cgroup_subsys_state *css)
383{
384 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
385}
386
387
388
389
390
391
392
393static inline void css_put(struct cgroup_subsys_state *css)
394{
395 if (!(css->flags & CSS_NO_REF))
396 percpu_ref_put(&css->refcnt);
397}
398
399
400
401
402
403
404
405
406static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
407{
408 if (!(css->flags & CSS_NO_REF))
409 percpu_ref_put_many(&css->refcnt, n);
410}
411
412static inline void cgroup_get(struct cgroup *cgrp)
413{
414 css_get(&cgrp->self);
415}
416
417static inline bool cgroup_tryget(struct cgroup *cgrp)
418{
419 return css_tryget(&cgrp->self);
420}
421
422static inline void cgroup_put(struct cgroup *cgrp)
423{
424 css_put(&cgrp->self);
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440#ifdef CONFIG_PROVE_RCU
441extern struct mutex cgroup_mutex;
442extern spinlock_t css_set_lock;
443#define task_css_set_check(task, __c) \
444 rcu_dereference_check((task)->cgroups, \
445 lockdep_is_held(&cgroup_mutex) || \
446 lockdep_is_held(&css_set_lock) || \
447 ((task)->flags & PF_EXITING) || (__c))
448#else
449#define task_css_set_check(task, __c) \
450 rcu_dereference((task)->cgroups)
451#endif
452
453
454
455
456
457
458
459
460
461
462#define task_css_check(task, subsys_id, __c) \
463 task_css_set_check((task), (__c))->subsys[(subsys_id)]
464
465
466
467
468
469
470
471static inline struct css_set *task_css_set(struct task_struct *task)
472{
473 return task_css_set_check(task, false);
474}
475
476
477
478
479
480
481
482
483static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
484 int subsys_id)
485{
486 return task_css_check(task, subsys_id, false);
487}
488
489
490
491
492
493
494
495
496
497
498static inline struct cgroup_subsys_state *
499task_get_css(struct task_struct *task, int subsys_id)
500{
501 struct cgroup_subsys_state *css;
502
503 rcu_read_lock();
504 while (true) {
505 css = task_css(task, subsys_id);
506
507
508
509
510
511
512 if (likely(css_tryget(css)))
513 break;
514 cpu_relax();
515 }
516 rcu_read_unlock();
517 return css;
518}
519
520
521
522
523
524
525
526
527
528static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
529{
530 return task_css_check(task, subsys_id, true) ==
531 init_css_set.subsys[subsys_id];
532}
533
534static inline struct cgroup *task_cgroup(struct task_struct *task,
535 int subsys_id)
536{
537 return task_css(task, subsys_id)->cgroup;
538}
539
540static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
541{
542 return task_css_set(task)->dfl_cgrp;
543}
544
545static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
546{
547 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
548
549 if (parent_css)
550 return container_of(parent_css, struct cgroup, self);
551 return NULL;
552}
553
554
555
556
557
558
559
560
561
562
563static inline bool cgroup_is_descendant(struct cgroup *cgrp,
564 struct cgroup *ancestor)
565{
566 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
567 return false;
568 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
569}
570
571
572
573
574
575
576
577
578
579
580
581
582static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
583 int ancestor_level)
584{
585 if (cgrp->level < ancestor_level)
586 return NULL;
587 while (cgrp && cgrp->level > ancestor_level)
588 cgrp = cgroup_parent(cgrp);
589 return cgrp;
590}
591
592
593
594
595
596
597
598
599
600
601static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
602 struct cgroup *ancestor)
603{
604 struct css_set *cset = task_css_set(task);
605
606 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
607}
608
609
610static inline bool cgroup_is_populated(struct cgroup *cgrp)
611{
612 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
613 cgrp->nr_populated_threaded_children;
614}
615
616
617static inline ino_t cgroup_ino(struct cgroup *cgrp)
618{
619 return cgrp->kn->id.ino;
620}
621
622
623static inline struct cftype *of_cft(struct kernfs_open_file *of)
624{
625 return of->kn->priv;
626}
627
628struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
629
630
631static inline struct cftype *seq_cft(struct seq_file *seq)
632{
633 return of_cft(seq->private);
634}
635
636static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
637{
638 return of_css(seq->private);
639}
640
641
642
643
644
645
646static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
647{
648 return kernfs_name(cgrp->kn, buf, buflen);
649}
650
651static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
652{
653 return kernfs_path(cgrp->kn, buf, buflen);
654}
655
656static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
657{
658 pr_cont_kernfs_name(cgrp->kn);
659}
660
661static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
662{
663 pr_cont_kernfs_path(cgrp->kn);
664}
665
666static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
667{
668 return &cgrp->psi;
669}
670
671static inline void cgroup_init_kthreadd(void)
672{
673
674
675
676
677
678 current->no_cgroup_migration = 1;
679}
680
681static inline void cgroup_kthread_ready(void)
682{
683
684
685
686
687 current->no_cgroup_migration = 0;
688}
689
690static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
691{
692 return &cgrp->kn->id;
693}
694
695void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
696 char *buf, size_t buflen);
697#else
698
699struct cgroup_subsys_state;
700struct cgroup;
701
702static inline void css_get(struct cgroup_subsys_state *css) {}
703static inline void css_put(struct cgroup_subsys_state *css) {}
704static inline int cgroup_attach_task_all(struct task_struct *from,
705 struct task_struct *t) { return 0; }
706static inline int cgroupstats_build(struct cgroupstats *stats,
707 struct dentry *dentry) { return -EINVAL; }
708
709static inline void cgroup_fork(struct task_struct *p) {}
710static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
711static inline void cgroup_cancel_fork(struct task_struct *p) {}
712static inline void cgroup_post_fork(struct task_struct *p) {}
713static inline void cgroup_exit(struct task_struct *p) {}
714static inline void cgroup_release(struct task_struct *p) {}
715static inline void cgroup_free(struct task_struct *p) {}
716
717static inline int cgroup_init_early(void) { return 0; }
718static inline int cgroup_init(void) { return 0; }
719static inline void cgroup_init_kthreadd(void) {}
720static inline void cgroup_kthread_ready(void) {}
721static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
722{
723 return NULL;
724}
725
726static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
727{
728 return NULL;
729}
730
731static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
732{
733 return NULL;
734}
735
736static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
737 struct cgroup *ancestor)
738{
739 return true;
740}
741
742static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
743 char *buf, size_t buflen) {}
744#endif
745
746#ifdef CONFIG_CGROUPS
747
748
749
750void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
751void cgroup_rstat_flush(struct cgroup *cgrp);
752void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
753void cgroup_rstat_flush_hold(struct cgroup *cgrp);
754void cgroup_rstat_flush_release(void);
755
756
757
758
759#ifdef CONFIG_CGROUP_CPUACCT
760void cpuacct_charge(struct task_struct *tsk, u64 cputime);
761void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
762#else
763static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
764static inline void cpuacct_account_field(struct task_struct *tsk, int index,
765 u64 val) {}
766#endif
767
768void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
769void __cgroup_account_cputime_field(struct cgroup *cgrp,
770 enum cpu_usage_stat index, u64 delta_exec);
771
772static inline void cgroup_account_cputime(struct task_struct *task,
773 u64 delta_exec)
774{
775 struct cgroup *cgrp;
776
777 cpuacct_charge(task, delta_exec);
778
779 rcu_read_lock();
780 cgrp = task_dfl_cgroup(task);
781 if (cgroup_parent(cgrp))
782 __cgroup_account_cputime(cgrp, delta_exec);
783 rcu_read_unlock();
784}
785
786static inline void cgroup_account_cputime_field(struct task_struct *task,
787 enum cpu_usage_stat index,
788 u64 delta_exec)
789{
790 struct cgroup *cgrp;
791
792 cpuacct_account_field(task, index, delta_exec);
793
794 rcu_read_lock();
795 cgrp = task_dfl_cgroup(task);
796 if (cgroup_parent(cgrp))
797 __cgroup_account_cputime_field(cgrp, index, delta_exec);
798 rcu_read_unlock();
799}
800
801#else
802
803static inline void cgroup_account_cputime(struct task_struct *task,
804 u64 delta_exec) {}
805static inline void cgroup_account_cputime_field(struct task_struct *task,
806 enum cpu_usage_stat index,
807 u64 delta_exec) {}
808
809#endif
810
811
812
813
814
815#ifdef CONFIG_SOCK_CGROUP_DATA
816
817#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
818extern spinlock_t cgroup_sk_update_lock;
819#endif
820
821void cgroup_sk_alloc_disable(void);
822void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
823void cgroup_sk_free(struct sock_cgroup_data *skcd);
824
825static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
826{
827#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
828 unsigned long v;
829
830
831
832
833
834 v = READ_ONCE(skcd->val);
835
836 if (v & 1)
837 return &cgrp_dfl_root.cgrp;
838
839 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
840#else
841 return (struct cgroup *)(unsigned long)skcd->val;
842#endif
843}
844
845#else
846
847static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
848static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
849
850#endif
851
852struct cgroup_namespace {
853 refcount_t count;
854 struct ns_common ns;
855 struct user_namespace *user_ns;
856 struct ucounts *ucounts;
857 struct css_set *root_cset;
858};
859
860extern struct cgroup_namespace init_cgroup_ns;
861
862#ifdef CONFIG_CGROUPS
863
864void free_cgroup_ns(struct cgroup_namespace *ns);
865
866struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
867 struct user_namespace *user_ns,
868 struct cgroup_namespace *old_ns);
869
870int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
871 struct cgroup_namespace *ns);
872
873#else
874
875static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
876static inline struct cgroup_namespace *
877copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
878 struct cgroup_namespace *old_ns)
879{
880 return old_ns;
881}
882
883#endif
884
885static inline void get_cgroup_ns(struct cgroup_namespace *ns)
886{
887 if (ns)
888 refcount_inc(&ns->count);
889}
890
891static inline void put_cgroup_ns(struct cgroup_namespace *ns)
892{
893 if (ns && refcount_dec_and_test(&ns->count))
894 free_cgroup_ns(ns);
895}
896
897#ifdef CONFIG_CGROUPS
898
899void cgroup_enter_frozen(void);
900void cgroup_leave_frozen(bool always_leave);
901void cgroup_update_frozen(struct cgroup *cgrp);
902void cgroup_freeze(struct cgroup *cgrp, bool freeze);
903void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
904 struct cgroup *dst);
905
906static inline bool cgroup_task_freeze(struct task_struct *task)
907{
908 bool ret;
909
910 if (task->flags & PF_KTHREAD)
911 return false;
912
913 rcu_read_lock();
914 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
915 rcu_read_unlock();
916
917 return ret;
918}
919
920static inline bool cgroup_task_frozen(struct task_struct *task)
921{
922 return task->frozen;
923}
924
925#else
926
927static inline void cgroup_enter_frozen(void) { }
928static inline void cgroup_leave_frozen(bool always_leave) { }
929static inline bool cgroup_task_freeze(struct task_struct *task)
930{
931 return false;
932}
933static inline bool cgroup_task_frozen(struct task_struct *task)
934{
935 return false;
936}
937
938#endif
939
940#ifdef CONFIG_CGROUP_BPF
941static inline void cgroup_bpf_get(struct cgroup *cgrp)
942{
943 percpu_ref_get(&cgrp->bpf.refcnt);
944}
945
946static inline void cgroup_bpf_put(struct cgroup *cgrp)
947{
948 percpu_ref_put(&cgrp->bpf.refcnt);
949}
950
951#else
952
953static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
954static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
955
956#endif
957
958#endif
959