1
2#ifndef _LINUX_CGROUP_H
3#define _LINUX_CGROUP_H
4
5
6
7
8
9
10
11
12#include <linux/sched.h>
13#include <linux/cpumask.h>
14#include <linux/nodemask.h>
15#include <linux/rculist.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs.h>
18#include <linux/seq_file.h>
19#include <linux/kernfs.h>
20#include <linux/jump_label.h>
21#include <linux/types.h>
22#include <linux/ns_common.h>
23#include <linux/nsproxy.h>
24#include <linux/user_namespace.h>
25#include <linux/refcount.h>
26#include <linux/kernel_stat.h>
27
28#include <linux/cgroup-defs.h>
29
30#ifdef CONFIG_CGROUPS
31
32
33
34
35
36
37#define CGROUP_WEIGHT_MIN 1
38#define CGROUP_WEIGHT_DFL 100
39#define CGROUP_WEIGHT_MAX 10000
40
41
42#define CSS_TASK_ITER_PROCS (1U << 0)
43
44#define CSS_TASK_ITER_THREADED (1U << 1)
45
46
47#define CSS_TASK_ITER_SKIPPED (1U << 16)
48
49
50struct css_task_iter {
51 struct cgroup_subsys *ss;
52 unsigned int flags;
53
54 struct list_head *cset_pos;
55 struct list_head *cset_head;
56
57 struct list_head *tcset_pos;
58 struct list_head *tcset_head;
59
60 struct list_head *task_pos;
61 struct list_head *tasks_head;
62 struct list_head *mg_tasks_head;
63 struct list_head *dying_tasks_head;
64
65 struct css_set *cur_cset;
66 struct css_set *cur_dcset;
67 struct task_struct *cur_task;
68 struct list_head iters_node;
69};
70
71extern struct cgroup_root cgrp_dfl_root;
72extern struct css_set init_css_set;
73
74#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
75#include <linux/cgroup_subsys.h>
76#undef SUBSYS
77
78#define SUBSYS(_x) \
79 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
80 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
81#include <linux/cgroup_subsys.h>
82#undef SUBSYS
83
84
85
86
87
88#define cgroup_subsys_enabled(ss) \
89 static_branch_likely(&ss ## _enabled_key)
90
91
92
93
94
95#define cgroup_subsys_on_dfl(ss) \
96 static_branch_likely(&ss ## _on_dfl_key)
97
98bool css_has_online_children(struct cgroup_subsys_state *css);
99struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
100struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
101 struct cgroup_subsys *ss);
102struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
103 struct cgroup_subsys *ss);
104struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
105 struct cgroup_subsys *ss);
106
107struct cgroup *cgroup_get_from_path(const char *path);
108struct cgroup *cgroup_get_from_fd(int fd);
109
110int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
111int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
112
113int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
114int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
115int cgroup_rm_cftypes(struct cftype *cfts);
116void cgroup_file_notify(struct cgroup_file *cfile);
117
118int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
119int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
120int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
121 struct pid *pid, struct task_struct *tsk);
122
123void cgroup_fork(struct task_struct *p);
124extern int cgroup_can_fork(struct task_struct *p);
125extern void cgroup_cancel_fork(struct task_struct *p);
126extern void cgroup_post_fork(struct task_struct *p);
127void cgroup_exit(struct task_struct *p);
128void cgroup_release(struct task_struct *p);
129void cgroup_free(struct task_struct *p);
130
131int cgroup_init_early(void);
132int cgroup_init(void);
133
134int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
135
136
137
138
139
140struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
141 struct cgroup_subsys_state *parent);
142struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
143 struct cgroup_subsys_state *css);
144struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
145struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
146 struct cgroup_subsys_state *css);
147
148struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
149 struct cgroup_subsys_state **dst_cssp);
150struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
151 struct cgroup_subsys_state **dst_cssp);
152
153void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
154 struct css_task_iter *it);
155struct task_struct *css_task_iter_next(struct css_task_iter *it);
156void css_task_iter_end(struct css_task_iter *it);
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176#define css_for_each_child(pos, parent) \
177 for ((pos) = css_next_child(NULL, (parent)); (pos); \
178 (pos) = css_next_child((pos), (parent)))
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236#define css_for_each_descendant_pre(pos, css) \
237 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
238 (pos) = css_next_descendant_pre((pos), (css)))
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259#define css_for_each_descendant_post(pos, css) \
260 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
261 (pos) = css_next_descendant_post((pos), (css)))
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281#define cgroup_taskset_for_each(task, dst_css, tset) \
282 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
283 (task); \
284 (task) = cgroup_taskset_next((tset), &(dst_css)))
285
286
287
288
289
290
291
292
293
294
295#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
296 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
297 (leader); \
298 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
299 if ((leader) != (leader)->group_leader) \
300 ; \
301 else
302
303
304
305
306
307static inline u64 cgroup_id(struct cgroup *cgrp)
308{
309 return cgrp->kn->id;
310}
311
312
313
314
315
316
317
318static inline void css_get(struct cgroup_subsys_state *css)
319{
320 if (!(css->flags & CSS_NO_REF))
321 percpu_ref_get(&css->refcnt);
322}
323
324
325
326
327
328
329
330
331static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
332{
333 if (!(css->flags & CSS_NO_REF))
334 percpu_ref_get_many(&css->refcnt, n);
335}
336
337
338
339
340
341
342
343
344
345
346
347
348static inline bool css_tryget(struct cgroup_subsys_state *css)
349{
350 if (!(css->flags & CSS_NO_REF))
351 return percpu_ref_tryget(&css->refcnt);
352 return true;
353}
354
355
356
357
358
359
360
361
362
363
364
365static inline bool css_tryget_online(struct cgroup_subsys_state *css)
366{
367 if (!(css->flags & CSS_NO_REF))
368 return percpu_ref_tryget_live(&css->refcnt);
369 return true;
370}
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387static inline bool css_is_dying(struct cgroup_subsys_state *css)
388{
389 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
390}
391
392
393
394
395
396
397
398static inline void css_put(struct cgroup_subsys_state *css)
399{
400 if (!(css->flags & CSS_NO_REF))
401 percpu_ref_put(&css->refcnt);
402}
403
404
405
406
407
408
409
410
411static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
412{
413 if (!(css->flags & CSS_NO_REF))
414 percpu_ref_put_many(&css->refcnt, n);
415}
416
417static inline void cgroup_get(struct cgroup *cgrp)
418{
419 css_get(&cgrp->self);
420}
421
422static inline bool cgroup_tryget(struct cgroup *cgrp)
423{
424 return css_tryget(&cgrp->self);
425}
426
427static inline void cgroup_put(struct cgroup *cgrp)
428{
429 css_put(&cgrp->self);
430}
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445#ifdef CONFIG_PROVE_RCU
446extern struct mutex cgroup_mutex;
447extern spinlock_t css_set_lock;
448#define task_css_set_check(task, __c) \
449 rcu_dereference_check((task)->cgroups, \
450 lockdep_is_held(&cgroup_mutex) || \
451 lockdep_is_held(&css_set_lock) || \
452 ((task)->flags & PF_EXITING) || (__c))
453#else
454#define task_css_set_check(task, __c) \
455 rcu_dereference((task)->cgroups)
456#endif
457
458
459
460
461
462
463
464
465
466
467#define task_css_check(task, subsys_id, __c) \
468 task_css_set_check((task), (__c))->subsys[(subsys_id)]
469
470
471
472
473
474
475
476static inline struct css_set *task_css_set(struct task_struct *task)
477{
478 return task_css_set_check(task, false);
479}
480
481
482
483
484
485
486
487
488static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
489 int subsys_id)
490{
491 return task_css_check(task, subsys_id, false);
492}
493
494
495
496
497
498
499
500
501
502
503static inline struct cgroup_subsys_state *
504task_get_css(struct task_struct *task, int subsys_id)
505{
506 struct cgroup_subsys_state *css;
507
508 rcu_read_lock();
509 while (true) {
510 css = task_css(task, subsys_id);
511
512
513
514
515
516
517 if (likely(css_tryget(css)))
518 break;
519 cpu_relax();
520 }
521 rcu_read_unlock();
522 return css;
523}
524
525
526
527
528
529
530
531
532
533static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
534{
535 return task_css_check(task, subsys_id, true) ==
536 init_css_set.subsys[subsys_id];
537}
538
539static inline struct cgroup *task_cgroup(struct task_struct *task,
540 int subsys_id)
541{
542 return task_css(task, subsys_id)->cgroup;
543}
544
545static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
546{
547 return task_css_set(task)->dfl_cgrp;
548}
549
550static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
551{
552 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
553
554 if (parent_css)
555 return container_of(parent_css, struct cgroup, self);
556 return NULL;
557}
558
559
560
561
562
563
564
565
566
567
568static inline bool cgroup_is_descendant(struct cgroup *cgrp,
569 struct cgroup *ancestor)
570{
571 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
572 return false;
573 return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
574}
575
576
577
578
579
580
581
582
583
584
585
586
587static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
588 int ancestor_level)
589{
590 if (cgrp->level < ancestor_level)
591 return NULL;
592 while (cgrp && cgrp->level > ancestor_level)
593 cgrp = cgroup_parent(cgrp);
594 return cgrp;
595}
596
597
598
599
600
601
602
603
604
605
606static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
607 struct cgroup *ancestor)
608{
609 struct css_set *cset = task_css_set(task);
610
611 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
612}
613
614
615static inline bool cgroup_is_populated(struct cgroup *cgrp)
616{
617 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
618 cgrp->nr_populated_threaded_children;
619}
620
621
622static inline ino_t cgroup_ino(struct cgroup *cgrp)
623{
624 return kernfs_ino(cgrp->kn);
625}
626
627
628static inline struct cftype *of_cft(struct kernfs_open_file *of)
629{
630 return of->kn->priv;
631}
632
633struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
634
635
636static inline struct cftype *seq_cft(struct seq_file *seq)
637{
638 return of_cft(seq->private);
639}
640
641static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
642{
643 return of_css(seq->private);
644}
645
646
647
648
649
650
651static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
652{
653 return kernfs_name(cgrp->kn, buf, buflen);
654}
655
656static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
657{
658 return kernfs_path(cgrp->kn, buf, buflen);
659}
660
661static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
662{
663 pr_cont_kernfs_name(cgrp->kn);
664}
665
666static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
667{
668 pr_cont_kernfs_path(cgrp->kn);
669}
670
671static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
672{
673 return &cgrp->psi;
674}
675
676static inline void cgroup_init_kthreadd(void)
677{
678
679
680
681
682
683 current->no_cgroup_migration = 1;
684}
685
686static inline void cgroup_kthread_ready(void)
687{
688
689
690
691
692 current->no_cgroup_migration = 0;
693}
694
695void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
696#else
697
698struct cgroup_subsys_state;
699struct cgroup;
700
701static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
702static inline void css_get(struct cgroup_subsys_state *css) {}
703static inline void css_put(struct cgroup_subsys_state *css) {}
704static inline int cgroup_attach_task_all(struct task_struct *from,
705 struct task_struct *t) { return 0; }
706static inline int cgroupstats_build(struct cgroupstats *stats,
707 struct dentry *dentry) { return -EINVAL; }
708
709static inline void cgroup_fork(struct task_struct *p) {}
710static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
711static inline void cgroup_cancel_fork(struct task_struct *p) {}
712static inline void cgroup_post_fork(struct task_struct *p) {}
713static inline void cgroup_exit(struct task_struct *p) {}
714static inline void cgroup_release(struct task_struct *p) {}
715static inline void cgroup_free(struct task_struct *p) {}
716
717static inline int cgroup_init_early(void) { return 0; }
718static inline int cgroup_init(void) { return 0; }
719static inline void cgroup_init_kthreadd(void) {}
720static inline void cgroup_kthread_ready(void) {}
721
722static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
723{
724 return NULL;
725}
726
727static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
728{
729 return NULL;
730}
731
732static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
733 struct cgroup *ancestor)
734{
735 return true;
736}
737
738static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
739{}
740#endif
741
742#ifdef CONFIG_CGROUPS
743
744
745
746void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
747void cgroup_rstat_flush(struct cgroup *cgrp);
748void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
749void cgroup_rstat_flush_hold(struct cgroup *cgrp);
750void cgroup_rstat_flush_release(void);
751
752
753
754
755#ifdef CONFIG_CGROUP_CPUACCT
756void cpuacct_charge(struct task_struct *tsk, u64 cputime);
757void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
758#else
759static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
760static inline void cpuacct_account_field(struct task_struct *tsk, int index,
761 u64 val) {}
762#endif
763
764void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
765void __cgroup_account_cputime_field(struct cgroup *cgrp,
766 enum cpu_usage_stat index, u64 delta_exec);
767
768static inline void cgroup_account_cputime(struct task_struct *task,
769 u64 delta_exec)
770{
771 struct cgroup *cgrp;
772
773 cpuacct_charge(task, delta_exec);
774
775 rcu_read_lock();
776 cgrp = task_dfl_cgroup(task);
777 if (cgroup_parent(cgrp))
778 __cgroup_account_cputime(cgrp, delta_exec);
779 rcu_read_unlock();
780}
781
782static inline void cgroup_account_cputime_field(struct task_struct *task,
783 enum cpu_usage_stat index,
784 u64 delta_exec)
785{
786 struct cgroup *cgrp;
787
788 cpuacct_account_field(task, index, delta_exec);
789
790 rcu_read_lock();
791 cgrp = task_dfl_cgroup(task);
792 if (cgroup_parent(cgrp))
793 __cgroup_account_cputime_field(cgrp, index, delta_exec);
794 rcu_read_unlock();
795}
796
797#else
798
799static inline void cgroup_account_cputime(struct task_struct *task,
800 u64 delta_exec) {}
801static inline void cgroup_account_cputime_field(struct task_struct *task,
802 enum cpu_usage_stat index,
803 u64 delta_exec) {}
804
805#endif
806
807
808
809
810
811#ifdef CONFIG_SOCK_CGROUP_DATA
812
813#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
814extern spinlock_t cgroup_sk_update_lock;
815#endif
816
817void cgroup_sk_alloc_disable(void);
818void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
819void cgroup_sk_free(struct sock_cgroup_data *skcd);
820
821static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
822{
823#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
824 unsigned long v;
825
826
827
828
829
830 v = READ_ONCE(skcd->val);
831
832 if (v & 1)
833 return &cgrp_dfl_root.cgrp;
834
835 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
836#else
837 return (struct cgroup *)(unsigned long)skcd->val;
838#endif
839}
840
841#else
842
843static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
844static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
845
846#endif
847
848struct cgroup_namespace {
849 refcount_t count;
850 struct ns_common ns;
851 struct user_namespace *user_ns;
852 struct ucounts *ucounts;
853 struct css_set *root_cset;
854};
855
856extern struct cgroup_namespace init_cgroup_ns;
857
858#ifdef CONFIG_CGROUPS
859
860void free_cgroup_ns(struct cgroup_namespace *ns);
861
862struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
863 struct user_namespace *user_ns,
864 struct cgroup_namespace *old_ns);
865
866int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
867 struct cgroup_namespace *ns);
868
869#else
870
871static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
872static inline struct cgroup_namespace *
873copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
874 struct cgroup_namespace *old_ns)
875{
876 return old_ns;
877}
878
879#endif
880
881static inline void get_cgroup_ns(struct cgroup_namespace *ns)
882{
883 if (ns)
884 refcount_inc(&ns->count);
885}
886
887static inline void put_cgroup_ns(struct cgroup_namespace *ns)
888{
889 if (ns && refcount_dec_and_test(&ns->count))
890 free_cgroup_ns(ns);
891}
892
893#ifdef CONFIG_CGROUPS
894
895void cgroup_enter_frozen(void);
896void cgroup_leave_frozen(bool always_leave);
897void cgroup_update_frozen(struct cgroup *cgrp);
898void cgroup_freeze(struct cgroup *cgrp, bool freeze);
899void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
900 struct cgroup *dst);
901
902static inline bool cgroup_task_freeze(struct task_struct *task)
903{
904 bool ret;
905
906 if (task->flags & PF_KTHREAD)
907 return false;
908
909 rcu_read_lock();
910 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
911 rcu_read_unlock();
912
913 return ret;
914}
915
916static inline bool cgroup_task_frozen(struct task_struct *task)
917{
918 return task->frozen;
919}
920
921#else
922
923static inline void cgroup_enter_frozen(void) { }
924static inline void cgroup_leave_frozen(bool always_leave) { }
925static inline bool cgroup_task_freeze(struct task_struct *task)
926{
927 return false;
928}
929static inline bool cgroup_task_frozen(struct task_struct *task)
930{
931 return false;
932}
933
934#endif
935
936#ifdef CONFIG_CGROUP_BPF
937static inline void cgroup_bpf_get(struct cgroup *cgrp)
938{
939 percpu_ref_get(&cgrp->bpf.refcnt);
940}
941
942static inline void cgroup_bpf_put(struct cgroup *cgrp)
943{
944 percpu_ref_put(&cgrp->bpf.refcnt);
945}
946
947#else
948
949static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
950static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
951
952#endif
953
954#endif
955