1
2#ifndef _LINUX_CGROUP_H
3#define _LINUX_CGROUP_H
4
5
6
7
8
9
10
11
12#include <linux/sched.h>
13#include <linux/cpumask.h>
14#include <linux/nodemask.h>
15#include <linux/rculist.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs.h>
18#include <linux/seq_file.h>
19#include <linux/kernfs.h>
20#include <linux/jump_label.h>
21#include <linux/types.h>
22#include <linux/ns_common.h>
23#include <linux/nsproxy.h>
24#include <linux/user_namespace.h>
25#include <linux/refcount.h>
26#include <linux/kernel_stat.h>
27
28#include <linux/cgroup-defs.h>
29
30#ifdef CONFIG_CGROUPS
31
32
33
34
35
36
37#define CGROUP_WEIGHT_MIN 1
38#define CGROUP_WEIGHT_DFL 100
39#define CGROUP_WEIGHT_MAX 10000
40
41
42#define CSS_TASK_ITER_PROCS (1U << 0)
43
44#define CSS_TASK_ITER_THREADED (1U << 1)
45
46
47#define CSS_TASK_ITER_SKIPPED (1U << 16)
48
49
50struct css_task_iter {
51 struct cgroup_subsys *ss;
52 unsigned int flags;
53
54 struct list_head *cset_pos;
55 struct list_head *cset_head;
56
57 struct list_head *tcset_pos;
58 struct list_head *tcset_head;
59
60 struct list_head *task_pos;
61 struct list_head *tasks_head;
62 struct list_head *mg_tasks_head;
63 struct list_head *dying_tasks_head;
64
65 struct css_set *cur_cset;
66 struct css_set *cur_dcset;
67 struct task_struct *cur_task;
68 struct list_head iters_node;
69};
70
71extern struct cgroup_root cgrp_dfl_root;
72extern struct css_set init_css_set;
73
74#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
75#include <linux/cgroup_subsys.h>
76#undef SUBSYS
77
78#define SUBSYS(_x) \
79 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
80 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
81#include <linux/cgroup_subsys.h>
82#undef SUBSYS
83
84
85
86
87
88#define cgroup_subsys_enabled(ss) \
89 static_branch_likely(&ss ## _enabled_key)
90
91
92
93
94
95#define cgroup_subsys_on_dfl(ss) \
96 static_branch_likely(&ss ## _on_dfl_key)
97
98bool css_has_online_children(struct cgroup_subsys_state *css);
99struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
100struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
101 struct cgroup_subsys *ss);
102struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
103 struct cgroup_subsys *ss);
104struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
105 struct cgroup_subsys *ss);
106
107struct cgroup *cgroup_get_from_path(const char *path);
108struct cgroup *cgroup_get_from_fd(int fd);
109
110int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
111int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
112
113int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
114int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
115int cgroup_rm_cftypes(struct cftype *cfts);
116void cgroup_file_notify(struct cgroup_file *cfile);
117
118int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
119int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
120int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
121 struct pid *pid, struct task_struct *tsk);
122
123void cgroup_fork(struct task_struct *p);
124extern int cgroup_can_fork(struct task_struct *p);
125extern void cgroup_cancel_fork(struct task_struct *p);
126extern void cgroup_post_fork(struct task_struct *p);
127void cgroup_exit(struct task_struct *p);
128void cgroup_release(struct task_struct *p);
129void cgroup_free(struct task_struct *p);
130
131int cgroup_init_early(void);
132int cgroup_init(void);
133
134int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
135
136
137
138
139
140struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
141 struct cgroup_subsys_state *parent);
142struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
143 struct cgroup_subsys_state *css);
144struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
145struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
146 struct cgroup_subsys_state *css);
147
148struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
149 struct cgroup_subsys_state **dst_cssp);
150struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
151 struct cgroup_subsys_state **dst_cssp);
152
153void cgroup_enable_task_cg_lists(void);
154void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
155 struct css_task_iter *it);
156struct task_struct *css_task_iter_next(struct css_task_iter *it);
157void css_task_iter_end(struct css_task_iter *it);
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177#define css_for_each_child(pos, parent) \
178 for ((pos) = css_next_child(NULL, (parent)); (pos); \
179 (pos) = css_next_child((pos), (parent)))
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237#define css_for_each_descendant_pre(pos, css) \
238 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
239 (pos) = css_next_descendant_pre((pos), (css)))
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260#define css_for_each_descendant_post(pos, css) \
261 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
262 (pos) = css_next_descendant_post((pos), (css)))
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282#define cgroup_taskset_for_each(task, dst_css, tset) \
283 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
284 (task); \
285 (task) = cgroup_taskset_next((tset), &(dst_css)))
286
287
288
289
290
291
292
293
294
295
296#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
297 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
298 (leader); \
299 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
300 if ((leader) != (leader)->group_leader) \
301 ; \
302 else
303
304
305
306
307
308
309
310
311
312
313
314static inline void css_get(struct cgroup_subsys_state *css)
315{
316 if (!(css->flags & CSS_NO_REF))
317 percpu_ref_get(&css->refcnt);
318}
319
320
321
322
323
324
325
326
327static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
328{
329 if (!(css->flags & CSS_NO_REF))
330 percpu_ref_get_many(&css->refcnt, n);
331}
332
333
334
335
336
337
338
339
340
341
342
343
344static inline bool css_tryget(struct cgroup_subsys_state *css)
345{
346 if (!(css->flags & CSS_NO_REF))
347 return percpu_ref_tryget(&css->refcnt);
348 return true;
349}
350
351
352
353
354
355
356
357
358
359
360
361static inline bool css_tryget_online(struct cgroup_subsys_state *css)
362{
363 if (!(css->flags & CSS_NO_REF))
364 return percpu_ref_tryget_live(&css->refcnt);
365 return true;
366}
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383static inline bool css_is_dying(struct cgroup_subsys_state *css)
384{
385 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
386}
387
388
389
390
391
392
393
394static inline void css_put(struct cgroup_subsys_state *css)
395{
396 if (!(css->flags & CSS_NO_REF))
397 percpu_ref_put(&css->refcnt);
398}
399
400
401
402
403
404
405
406
407static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
408{
409 if (!(css->flags & CSS_NO_REF))
410 percpu_ref_put_many(&css->refcnt, n);
411}
412
413static inline void cgroup_get(struct cgroup *cgrp)
414{
415 css_get(&cgrp->self);
416}
417
418static inline bool cgroup_tryget(struct cgroup *cgrp)
419{
420 return css_tryget(&cgrp->self);
421}
422
423static inline void cgroup_put(struct cgroup *cgrp)
424{
425 css_put(&cgrp->self);
426}
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441#ifdef CONFIG_PROVE_RCU
442extern struct mutex cgroup_mutex;
443extern spinlock_t css_set_lock;
444#define task_css_set_check(task, __c) \
445 rcu_dereference_check((task)->cgroups, \
446 lockdep_is_held(&cgroup_mutex) || \
447 lockdep_is_held(&css_set_lock) || \
448 ((task)->flags & PF_EXITING) || (__c))
449#else
450#define task_css_set_check(task, __c) \
451 rcu_dereference((task)->cgroups)
452#endif
453
454
455
456
457
458
459
460
461
462
463#define task_css_check(task, subsys_id, __c) \
464 task_css_set_check((task), (__c))->subsys[(subsys_id)]
465
466
467
468
469
470
471
472static inline struct css_set *task_css_set(struct task_struct *task)
473{
474 return task_css_set_check(task, false);
475}
476
477
478
479
480
481
482
483
484static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
485 int subsys_id)
486{
487 return task_css_check(task, subsys_id, false);
488}
489
490
491
492
493
494
495
496
497
498
499static inline struct cgroup_subsys_state *
500task_get_css(struct task_struct *task, int subsys_id)
501{
502 struct cgroup_subsys_state *css;
503
504 rcu_read_lock();
505 while (true) {
506 css = task_css(task, subsys_id);
507
508
509
510
511
512
513 if (likely(css_tryget(css)))
514 break;
515 cpu_relax();
516 }
517 rcu_read_unlock();
518 return css;
519}
520
521
522
523
524
525
526
527
528
529static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
530{
531 return task_css_check(task, subsys_id, true) ==
532 init_css_set.subsys[subsys_id];
533}
534
535static inline struct cgroup *task_cgroup(struct task_struct *task,
536 int subsys_id)
537{
538 return task_css(task, subsys_id)->cgroup;
539}
540
541static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
542{
543 return task_css_set(task)->dfl_cgrp;
544}
545
546static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
547{
548 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
549
550 if (parent_css)
551 return container_of(parent_css, struct cgroup, self);
552 return NULL;
553}
554
555
556
557
558
559
560
561
562
563
564static inline bool cgroup_is_descendant(struct cgroup *cgrp,
565 struct cgroup *ancestor)
566{
567 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
568 return false;
569 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
570}
571
572
573
574
575
576
577
578
579
580
581
582
583static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
584 int ancestor_level)
585{
586 if (cgrp->level < ancestor_level)
587 return NULL;
588 while (cgrp && cgrp->level > ancestor_level)
589 cgrp = cgroup_parent(cgrp);
590 return cgrp;
591}
592
593
594
595
596
597
598
599
600
601
602static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
603 struct cgroup *ancestor)
604{
605 struct css_set *cset = task_css_set(task);
606
607 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
608}
609
610
611static inline bool cgroup_is_populated(struct cgroup *cgrp)
612{
613 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
614 cgrp->nr_populated_threaded_children;
615}
616
617
618static inline ino_t cgroup_ino(struct cgroup *cgrp)
619{
620 return cgrp->kn->id.ino;
621}
622
623
624static inline struct cftype *of_cft(struct kernfs_open_file *of)
625{
626 return of->kn->priv;
627}
628
629struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
630
631
632static inline struct cftype *seq_cft(struct seq_file *seq)
633{
634 return of_cft(seq->private);
635}
636
637static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
638{
639 return of_css(seq->private);
640}
641
642
643
644
645
646
647static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
648{
649 return kernfs_name(cgrp->kn, buf, buflen);
650}
651
652static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
653{
654 return kernfs_path(cgrp->kn, buf, buflen);
655}
656
657static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
658{
659 pr_cont_kernfs_name(cgrp->kn);
660}
661
662static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
663{
664 pr_cont_kernfs_path(cgrp->kn);
665}
666
667static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
668{
669 return &cgrp->psi;
670}
671
672static inline void cgroup_init_kthreadd(void)
673{
674
675
676
677
678
679 current->no_cgroup_migration = 1;
680}
681
682static inline void cgroup_kthread_ready(void)
683{
684
685
686
687
688 current->no_cgroup_migration = 0;
689}
690
691static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
692{
693 return &cgrp->kn->id;
694}
695
696void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
697 char *buf, size_t buflen);
698#else
699
700struct cgroup_subsys_state;
701struct cgroup;
702
703static inline void css_get(struct cgroup_subsys_state *css) {}
704static inline void css_put(struct cgroup_subsys_state *css) {}
705static inline int cgroup_attach_task_all(struct task_struct *from,
706 struct task_struct *t) { return 0; }
707static inline int cgroupstats_build(struct cgroupstats *stats,
708 struct dentry *dentry) { return -EINVAL; }
709
710static inline void cgroup_fork(struct task_struct *p) {}
711static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
712static inline void cgroup_cancel_fork(struct task_struct *p) {}
713static inline void cgroup_post_fork(struct task_struct *p) {}
714static inline void cgroup_exit(struct task_struct *p) {}
715static inline void cgroup_release(struct task_struct *p) {}
716static inline void cgroup_free(struct task_struct *p) {}
717
718static inline int cgroup_init_early(void) { return 0; }
719static inline int cgroup_init(void) { return 0; }
720static inline void cgroup_init_kthreadd(void) {}
721static inline void cgroup_kthread_ready(void) {}
722static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
723{
724 return NULL;
725}
726
727static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
728{
729 return NULL;
730}
731
732static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
733{
734 return NULL;
735}
736
737static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
738 struct cgroup *ancestor)
739{
740 return true;
741}
742
743static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
744 char *buf, size_t buflen) {}
745#endif
746
747#ifdef CONFIG_CGROUPS
748
749
750
751void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
752void cgroup_rstat_flush(struct cgroup *cgrp);
753void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
754void cgroup_rstat_flush_hold(struct cgroup *cgrp);
755void cgroup_rstat_flush_release(void);
756
757
758
759
760#ifdef CONFIG_CGROUP_CPUACCT
761void cpuacct_charge(struct task_struct *tsk, u64 cputime);
762void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
763#else
764static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
765static inline void cpuacct_account_field(struct task_struct *tsk, int index,
766 u64 val) {}
767#endif
768
769void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
770void __cgroup_account_cputime_field(struct cgroup *cgrp,
771 enum cpu_usage_stat index, u64 delta_exec);
772
773static inline void cgroup_account_cputime(struct task_struct *task,
774 u64 delta_exec)
775{
776 struct cgroup *cgrp;
777
778 cpuacct_charge(task, delta_exec);
779
780 rcu_read_lock();
781 cgrp = task_dfl_cgroup(task);
782 if (cgroup_parent(cgrp))
783 __cgroup_account_cputime(cgrp, delta_exec);
784 rcu_read_unlock();
785}
786
787static inline void cgroup_account_cputime_field(struct task_struct *task,
788 enum cpu_usage_stat index,
789 u64 delta_exec)
790{
791 struct cgroup *cgrp;
792
793 cpuacct_account_field(task, index, delta_exec);
794
795 rcu_read_lock();
796 cgrp = task_dfl_cgroup(task);
797 if (cgroup_parent(cgrp))
798 __cgroup_account_cputime_field(cgrp, index, delta_exec);
799 rcu_read_unlock();
800}
801
802#else
803
804static inline void cgroup_account_cputime(struct task_struct *task,
805 u64 delta_exec) {}
806static inline void cgroup_account_cputime_field(struct task_struct *task,
807 enum cpu_usage_stat index,
808 u64 delta_exec) {}
809
810#endif
811
812
813
814
815
816#ifdef CONFIG_SOCK_CGROUP_DATA
817
818#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
819extern spinlock_t cgroup_sk_update_lock;
820#endif
821
822void cgroup_sk_alloc_disable(void);
823void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
824void cgroup_sk_free(struct sock_cgroup_data *skcd);
825
826static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
827{
828#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
829 unsigned long v;
830
831
832
833
834
835 v = READ_ONCE(skcd->val);
836
837 if (v & 1)
838 return &cgrp_dfl_root.cgrp;
839
840 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
841#else
842 return (struct cgroup *)(unsigned long)skcd->val;
843#endif
844}
845
846#else
847
848static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
849static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
850
851#endif
852
853struct cgroup_namespace {
854 refcount_t count;
855 struct ns_common ns;
856 struct user_namespace *user_ns;
857 struct ucounts *ucounts;
858 struct css_set *root_cset;
859};
860
861extern struct cgroup_namespace init_cgroup_ns;
862
863#ifdef CONFIG_CGROUPS
864
865void free_cgroup_ns(struct cgroup_namespace *ns);
866
867struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
868 struct user_namespace *user_ns,
869 struct cgroup_namespace *old_ns);
870
871int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
872 struct cgroup_namespace *ns);
873
874#else
875
876static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
877static inline struct cgroup_namespace *
878copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
879 struct cgroup_namespace *old_ns)
880{
881 return old_ns;
882}
883
884#endif
885
886static inline void get_cgroup_ns(struct cgroup_namespace *ns)
887{
888 if (ns)
889 refcount_inc(&ns->count);
890}
891
892static inline void put_cgroup_ns(struct cgroup_namespace *ns)
893{
894 if (ns && refcount_dec_and_test(&ns->count))
895 free_cgroup_ns(ns);
896}
897
898#ifdef CONFIG_CGROUPS
899
900void cgroup_enter_frozen(void);
901void cgroup_leave_frozen(bool always_leave);
902void cgroup_update_frozen(struct cgroup *cgrp);
903void cgroup_freeze(struct cgroup *cgrp, bool freeze);
904void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
905 struct cgroup *dst);
906
907static inline bool cgroup_task_freeze(struct task_struct *task)
908{
909 bool ret;
910
911 if (task->flags & PF_KTHREAD)
912 return false;
913
914 rcu_read_lock();
915 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
916 rcu_read_unlock();
917
918 return ret;
919}
920
921static inline bool cgroup_task_frozen(struct task_struct *task)
922{
923 return task->frozen;
924}
925
926#else
927
928static inline void cgroup_enter_frozen(void) { }
929static inline void cgroup_leave_frozen(bool always_leave) { }
930static inline bool cgroup_task_freeze(struct task_struct *task)
931{
932 return false;
933}
934static inline bool cgroup_task_frozen(struct task_struct *task)
935{
936 return false;
937}
938
939#endif
940
941#ifdef CONFIG_CGROUP_BPF
942static inline void cgroup_bpf_get(struct cgroup *cgrp)
943{
944 percpu_ref_get(&cgrp->bpf.refcnt);
945}
946
947static inline void cgroup_bpf_put(struct cgroup *cgrp)
948{
949 percpu_ref_put(&cgrp->bpf.refcnt);
950}
951
952#else
953
954static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
955static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
956
957#endif
958
959#endif
960