1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/mutex.h>
18#include <linux/plist.h>
19#include <linux/hrtimer.h>
20#include <linux/irqflags.h>
21#include <linux/seccomp.h>
22#include <linux/nodemask.h>
23#include <linux/rcupdate.h>
24#include <linux/refcount.h>
25#include <linux/resource.h>
26#include <linux/latencytop.h>
27#include <linux/sched/prio.h>
28#include <linux/sched/types.h>
29#include <linux/signal_types.h>
30#include <linux/syscall_user_dispatch.h>
31#include <linux/mm_types_task.h>
32#include <linux/task_io_accounting.h>
33#include <linux/posix-timers.h>
34#include <linux/rseq.h>
35#include <linux/seqlock.h>
36#include <linux/kcsan.h>
37#include <asm/kmap_size.h>
38
39
40struct audit_context;
41struct backing_dev_info;
42struct bio_list;
43struct blk_plug;
44struct bpf_local_storage;
45struct capture_control;
46struct cfs_rq;
47struct fs_struct;
48struct futex_pi_state;
49struct io_context;
50struct io_uring_task;
51struct mempolicy;
52struct nameidata;
53struct nsproxy;
54struct perf_event_context;
55struct pid_namespace;
56struct pipe_inode_info;
57struct rcu_node;
58struct reclaim_state;
59struct robust_list_head;
60struct root_domain;
61struct rq;
62struct sched_attr;
63struct sched_param;
64struct seq_file;
65struct sighand_struct;
66struct signal_struct;
67struct task_delay_info;
68struct task_group;
69
70
71
72
73
74
75
76
77
78
79
80
81
82#define TASK_RUNNING 0x0000
83#define TASK_INTERRUPTIBLE 0x0001
84#define TASK_UNINTERRUPTIBLE 0x0002
85#define __TASK_STOPPED 0x0004
86#define __TASK_TRACED 0x0008
87
88#define EXIT_DEAD 0x0010
89#define EXIT_ZOMBIE 0x0020
90#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
91
92#define TASK_PARKED 0x0040
93#define TASK_DEAD 0x0080
94#define TASK_WAKEKILL 0x0100
95#define TASK_WAKING 0x0200
96#define TASK_NOLOAD 0x0400
97#define TASK_NEW 0x0800
98#define TASK_STATE_MAX 0x1000
99
100
101#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
102#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
103#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
104
105#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
106
107
108#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
109
110
111#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
112 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
113 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
114 TASK_PARKED)
115
116#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
117
118#define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
119
120#define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
121
122#define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
123
124#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
125
126
127
128
129
130#define is_special_task_state(state) \
131 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
132
133#define __set_current_state(state_value) \
134 do { \
135 WARN_ON_ONCE(is_special_task_state(state_value));\
136 current->task_state_change = _THIS_IP_; \
137 WRITE_ONCE(current->__state, (state_value)); \
138 } while (0)
139
140#define set_current_state(state_value) \
141 do { \
142 WARN_ON_ONCE(is_special_task_state(state_value));\
143 current->task_state_change = _THIS_IP_; \
144 smp_store_mb(current->__state, (state_value)); \
145 } while (0)
146
147#define set_special_state(state_value) \
148 do { \
149 unsigned long flags; \
150 WARN_ON_ONCE(!is_special_task_state(state_value)); \
151 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
152 current->task_state_change = _THIS_IP_; \
153 WRITE_ONCE(current->__state, (state_value)); \
154 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
155 } while (0)
156#else
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194#define __set_current_state(state_value) \
195 WRITE_ONCE(current->__state, (state_value))
196
197#define set_current_state(state_value) \
198 smp_store_mb(current->__state, (state_value))
199
200
201
202
203
204
205
206#define set_special_state(state_value) \
207 do { \
208 unsigned long flags; \
209 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
210 WRITE_ONCE(current->__state, (state_value)); \
211 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
212 } while (0)
213
214#endif
215
216#define get_current_state() READ_ONCE(current->__state)
217
218
219#define TASK_COMM_LEN 16
220
221extern void scheduler_tick(void);
222
223#define MAX_SCHEDULE_TIMEOUT LONG_MAX
224
225extern long schedule_timeout(long timeout);
226extern long schedule_timeout_interruptible(long timeout);
227extern long schedule_timeout_killable(long timeout);
228extern long schedule_timeout_uninterruptible(long timeout);
229extern long schedule_timeout_idle(long timeout);
230asmlinkage void schedule(void);
231extern void schedule_preempt_disabled(void);
232asmlinkage void preempt_schedule_irq(void);
233
234extern int __must_check io_schedule_prepare(void);
235extern void io_schedule_finish(int token);
236extern long io_schedule_timeout(long timeout);
237extern void io_schedule(void);
238
239
240
241
242
243
244
245
246
247
248struct prev_cputime {
249#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
250 u64 utime;
251 u64 stime;
252 raw_spinlock_t lock;
253#endif
254};
255
256enum vtime_state {
257
258 VTIME_INACTIVE = 0,
259
260 VTIME_IDLE,
261
262 VTIME_SYS,
263
264 VTIME_USER,
265
266 VTIME_GUEST,
267};
268
269struct vtime {
270 seqcount_t seqcount;
271 unsigned long long starttime;
272 enum vtime_state state;
273 unsigned int cpu;
274 u64 utime;
275 u64 stime;
276 u64 gtime;
277};
278
279
280
281
282
283
284
285enum uclamp_id {
286 UCLAMP_MIN = 0,
287 UCLAMP_MAX,
288 UCLAMP_CNT
289};
290
291#ifdef CONFIG_SMP
292extern struct root_domain def_root_domain;
293extern struct mutex sched_domains_mutex;
294#endif
295
296struct sched_info {
297#ifdef CONFIG_SCHED_INFO
298
299
300
301 unsigned long pcount;
302
303
304 unsigned long long run_delay;
305
306
307
308
309 unsigned long long last_arrival;
310
311
312 unsigned long long last_queued;
313
314#endif
315};
316
317
318
319
320
321
322
323
324# define SCHED_FIXEDPOINT_SHIFT 10
325# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
326
327
328# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
329# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
330
331struct load_weight {
332 unsigned long weight;
333 u32 inv_weight;
334};
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365struct util_est {
366 unsigned int enqueued;
367 unsigned int ewma;
368#define UTIL_EST_WEIGHT_SHIFT 2
369#define UTIL_AVG_UNCHANGED 0x80000000
370} __attribute__((__aligned__(sizeof(u64))));
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417struct sched_avg {
418 u64 last_update_time;
419 u64 load_sum;
420 u64 runnable_sum;
421 u32 util_sum;
422 u32 period_contrib;
423 unsigned long load_avg;
424 unsigned long runnable_avg;
425 unsigned long util_avg;
426 struct util_est util_est;
427} ____cacheline_aligned;
428
429struct sched_statistics {
430#ifdef CONFIG_SCHEDSTATS
431 u64 wait_start;
432 u64 wait_max;
433 u64 wait_count;
434 u64 wait_sum;
435 u64 iowait_count;
436 u64 iowait_sum;
437
438 u64 sleep_start;
439 u64 sleep_max;
440 s64 sum_sleep_runtime;
441
442 u64 block_start;
443 u64 block_max;
444 u64 exec_max;
445 u64 slice_max;
446
447 u64 nr_migrations_cold;
448 u64 nr_failed_migrations_affine;
449 u64 nr_failed_migrations_running;
450 u64 nr_failed_migrations_hot;
451 u64 nr_forced_migrations;
452
453 u64 nr_wakeups;
454 u64 nr_wakeups_sync;
455 u64 nr_wakeups_migrate;
456 u64 nr_wakeups_local;
457 u64 nr_wakeups_remote;
458 u64 nr_wakeups_affine;
459 u64 nr_wakeups_affine_attempts;
460 u64 nr_wakeups_passive;
461 u64 nr_wakeups_idle;
462#endif
463};
464
465struct sched_entity {
466
467 struct load_weight load;
468 struct rb_node run_node;
469 struct list_head group_node;
470 unsigned int on_rq;
471
472 u64 exec_start;
473 u64 sum_exec_runtime;
474 u64 vruntime;
475 u64 prev_sum_exec_runtime;
476
477 u64 nr_migrations;
478
479 struct sched_statistics statistics;
480
481#ifdef CONFIG_FAIR_GROUP_SCHED
482 int depth;
483 struct sched_entity *parent;
484
485 struct cfs_rq *cfs_rq;
486
487 struct cfs_rq *my_q;
488
489 unsigned long runnable_weight;
490#endif
491
492#ifdef CONFIG_SMP
493
494
495
496
497
498
499 struct sched_avg avg;
500#endif
501};
502
503struct sched_rt_entity {
504 struct list_head run_list;
505 unsigned long timeout;
506 unsigned long watchdog_stamp;
507 unsigned int time_slice;
508 unsigned short on_rq;
509 unsigned short on_list;
510
511 struct sched_rt_entity *back;
512#ifdef CONFIG_RT_GROUP_SCHED
513 struct sched_rt_entity *parent;
514
515 struct rt_rq *rt_rq;
516
517 struct rt_rq *my_q;
518#endif
519} __randomize_layout;
520
521struct sched_dl_entity {
522 struct rb_node rb_node;
523
524
525
526
527
528
529 u64 dl_runtime;
530 u64 dl_deadline;
531 u64 dl_period;
532 u64 dl_bw;
533 u64 dl_density;
534
535
536
537
538
539
540 s64 runtime;
541 u64 deadline;
542 unsigned int flags;
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568 unsigned int dl_throttled : 1;
569 unsigned int dl_yielded : 1;
570 unsigned int dl_non_contending : 1;
571 unsigned int dl_overrun : 1;
572
573
574
575
576
577 struct hrtimer dl_timer;
578
579
580
581
582
583
584
585
586 struct hrtimer inactive_timer;
587
588#ifdef CONFIG_RT_MUTEXES
589
590
591
592
593
594 struct sched_dl_entity *pi_se;
595#endif
596};
597
598#ifdef CONFIG_UCLAMP_TASK
599
600#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625struct uclamp_se {
626 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
627 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
628 unsigned int active : 1;
629 unsigned int user_defined : 1;
630};
631#endif
632
633union rcu_special {
634 struct {
635 u8 blocked;
636 u8 need_qs;
637 u8 exp_hint;
638 u8 need_mb;
639 } b;
640 u32 s;
641};
642
643enum perf_event_task_context {
644 perf_invalid_context = -1,
645 perf_hw_context = 0,
646 perf_sw_context,
647 perf_nr_task_contexts,
648};
649
650struct wake_q_node {
651 struct wake_q_node *next;
652};
653
654struct kmap_ctrl {
655#ifdef CONFIG_KMAP_LOCAL
656 int idx;
657 pte_t pteval[KM_MAX_IDX];
658#endif
659};
660
661struct task_struct {
662#ifdef CONFIG_THREAD_INFO_IN_TASK
663
664
665
666
667 struct thread_info thread_info;
668#endif
669 unsigned int __state;
670
671
672
673
674
675 randomized_struct_fields_start
676
677 void *stack;
678 refcount_t usage;
679
680 unsigned int flags;
681 unsigned int ptrace;
682
683#ifdef CONFIG_SMP
684 int on_cpu;
685 struct __call_single_node wake_entry;
686#ifdef CONFIG_THREAD_INFO_IN_TASK
687
688 unsigned int cpu;
689#endif
690 unsigned int wakee_flips;
691 unsigned long wakee_flip_decay_ts;
692 struct task_struct *last_wakee;
693
694
695
696
697
698
699
700
701 int recent_used_cpu;
702 int wake_cpu;
703#endif
704 int on_rq;
705
706 int prio;
707 int static_prio;
708 int normal_prio;
709 unsigned int rt_priority;
710
711 const struct sched_class *sched_class;
712 struct sched_entity se;
713 struct sched_rt_entity rt;
714 struct sched_dl_entity dl;
715
716#ifdef CONFIG_SCHED_CORE
717 struct rb_node core_node;
718 unsigned long core_cookie;
719 unsigned int core_occupation;
720#endif
721
722#ifdef CONFIG_CGROUP_SCHED
723 struct task_group *sched_task_group;
724#endif
725
726#ifdef CONFIG_UCLAMP_TASK
727
728
729
730
731 struct uclamp_se uclamp_req[UCLAMP_CNT];
732
733
734
735
736 struct uclamp_se uclamp[UCLAMP_CNT];
737#endif
738
739#ifdef CONFIG_PREEMPT_NOTIFIERS
740
741 struct hlist_head preempt_notifiers;
742#endif
743
744#ifdef CONFIG_BLK_DEV_IO_TRACE
745 unsigned int btrace_seq;
746#endif
747
748 unsigned int policy;
749 int nr_cpus_allowed;
750 const cpumask_t *cpus_ptr;
751 cpumask_t cpus_mask;
752 void *migration_pending;
753#ifdef CONFIG_SMP
754 unsigned short migration_disabled;
755#endif
756 unsigned short migration_flags;
757
758#ifdef CONFIG_PREEMPT_RCU
759 int rcu_read_lock_nesting;
760 union rcu_special rcu_read_unlock_special;
761 struct list_head rcu_node_entry;
762 struct rcu_node *rcu_blocked_node;
763#endif
764
765#ifdef CONFIG_TASKS_RCU
766 unsigned long rcu_tasks_nvcsw;
767 u8 rcu_tasks_holdout;
768 u8 rcu_tasks_idx;
769 int rcu_tasks_idle_cpu;
770 struct list_head rcu_tasks_holdout_list;
771#endif
772
773#ifdef CONFIG_TASKS_TRACE_RCU
774 int trc_reader_nesting;
775 int trc_ipi_to_cpu;
776 union rcu_special trc_reader_special;
777 bool trc_reader_checked;
778 struct list_head trc_holdout_list;
779#endif
780
781 struct sched_info sched_info;
782
783 struct list_head tasks;
784#ifdef CONFIG_SMP
785 struct plist_node pushable_tasks;
786 struct rb_node pushable_dl_tasks;
787#endif
788
789 struct mm_struct *mm;
790 struct mm_struct *active_mm;
791
792
793 struct vmacache vmacache;
794
795#ifdef SPLIT_RSS_COUNTING
796 struct task_rss_stat rss_stat;
797#endif
798 int exit_state;
799 int exit_code;
800 int exit_signal;
801
802 int pdeath_signal;
803
804 unsigned long jobctl;
805
806
807 unsigned int personality;
808
809
810 unsigned sched_reset_on_fork:1;
811 unsigned sched_contributes_to_load:1;
812 unsigned sched_migrated:1;
813#ifdef CONFIG_PSI
814 unsigned sched_psi_wake_requeue:1;
815#endif
816
817
818 unsigned :0;
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835 unsigned sched_remote_wakeup:1;
836
837
838 unsigned in_execve:1;
839 unsigned in_iowait:1;
840#ifndef TIF_RESTORE_SIGMASK
841 unsigned restore_sigmask:1;
842#endif
843#ifdef CONFIG_MEMCG
844 unsigned in_user_fault:1;
845#endif
846#ifdef CONFIG_COMPAT_BRK
847 unsigned brk_randomized:1;
848#endif
849#ifdef CONFIG_CGROUPS
850
851 unsigned no_cgroup_migration:1;
852
853 unsigned frozen:1;
854#endif
855#ifdef CONFIG_BLK_CGROUP
856 unsigned use_memdelay:1;
857#endif
858#ifdef CONFIG_PSI
859
860 unsigned in_memstall:1;
861#endif
862#ifdef CONFIG_PAGE_OWNER
863
864 unsigned in_page_owner:1;
865#endif
866
867 unsigned long atomic_flags;
868
869 struct restart_block restart_block;
870
871 pid_t pid;
872 pid_t tgid;
873
874#ifdef CONFIG_STACKPROTECTOR
875
876 unsigned long stack_canary;
877#endif
878
879
880
881
882
883
884
885 struct task_struct __rcu *real_parent;
886
887
888 struct task_struct __rcu *parent;
889
890
891
892
893 struct list_head children;
894 struct list_head sibling;
895 struct task_struct *group_leader;
896
897
898
899
900
901
902
903 struct list_head ptraced;
904 struct list_head ptrace_entry;
905
906
907 struct pid *thread_pid;
908 struct hlist_node pid_links[PIDTYPE_MAX];
909 struct list_head thread_group;
910 struct list_head thread_node;
911
912 struct completion *vfork_done;
913
914
915 int __user *set_child_tid;
916
917
918 int __user *clear_child_tid;
919
920
921 void *pf_io_worker;
922
923 u64 utime;
924 u64 stime;
925#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
926 u64 utimescaled;
927 u64 stimescaled;
928#endif
929 u64 gtime;
930 struct prev_cputime prev_cputime;
931#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
932 struct vtime vtime;
933#endif
934
935#ifdef CONFIG_NO_HZ_FULL
936 atomic_t tick_dep_mask;
937#endif
938
939 unsigned long nvcsw;
940 unsigned long nivcsw;
941
942
943 u64 start_time;
944
945
946 u64 start_boottime;
947
948
949 unsigned long min_flt;
950 unsigned long maj_flt;
951
952
953 struct posix_cputimers posix_cputimers;
954
955#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
956 struct posix_cputimers_work posix_cputimers_work;
957#endif
958
959
960
961
962 const struct cred __rcu *ptracer_cred;
963
964
965 const struct cred __rcu *real_cred;
966
967
968 const struct cred __rcu *cred;
969
970#ifdef CONFIG_KEYS
971
972 struct key *cached_requested_key;
973#endif
974
975
976
977
978
979
980
981
982 char comm[TASK_COMM_LEN];
983
984 struct nameidata *nameidata;
985
986#ifdef CONFIG_SYSVIPC
987 struct sysv_sem sysvsem;
988 struct sysv_shm sysvshm;
989#endif
990#ifdef CONFIG_DETECT_HUNG_TASK
991 unsigned long last_switch_count;
992 unsigned long last_switch_time;
993#endif
994
995 struct fs_struct *fs;
996
997
998 struct files_struct *files;
999
1000#ifdef CONFIG_IO_URING
1001 struct io_uring_task *io_uring;
1002#endif
1003
1004
1005 struct nsproxy *nsproxy;
1006
1007
1008 struct signal_struct *signal;
1009 struct sighand_struct __rcu *sighand;
1010 sigset_t blocked;
1011 sigset_t real_blocked;
1012
1013 sigset_t saved_sigmask;
1014 struct sigpending pending;
1015 unsigned long sas_ss_sp;
1016 size_t sas_ss_size;
1017 unsigned int sas_ss_flags;
1018
1019 struct callback_head *task_works;
1020
1021#ifdef CONFIG_AUDIT
1022#ifdef CONFIG_AUDITSYSCALL
1023 struct audit_context *audit_context;
1024#endif
1025 kuid_t loginuid;
1026 unsigned int sessionid;
1027#endif
1028 struct seccomp seccomp;
1029 struct syscall_user_dispatch syscall_dispatch;
1030
1031
1032 u64 parent_exec_id;
1033 u64 self_exec_id;
1034
1035
1036 spinlock_t alloc_lock;
1037
1038
1039 raw_spinlock_t pi_lock;
1040
1041 struct wake_q_node wake_q;
1042
1043#ifdef CONFIG_RT_MUTEXES
1044
1045 struct rb_root_cached pi_waiters;
1046
1047 struct task_struct *pi_top_task;
1048
1049 struct rt_mutex_waiter *pi_blocked_on;
1050#endif
1051
1052#ifdef CONFIG_DEBUG_MUTEXES
1053
1054 struct mutex_waiter *blocked_on;
1055#endif
1056
1057#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1058 int non_block_count;
1059#endif
1060
1061#ifdef CONFIG_TRACE_IRQFLAGS
1062 struct irqtrace_events irqtrace;
1063 unsigned int hardirq_threaded;
1064 u64 hardirq_chain_key;
1065 int softirqs_enabled;
1066 int softirq_context;
1067 int irq_config;
1068#endif
1069#ifdef CONFIG_PREEMPT_RT
1070 int softirq_disable_cnt;
1071#endif
1072
1073#ifdef CONFIG_LOCKDEP
1074# define MAX_LOCK_DEPTH 48UL
1075 u64 curr_chain_key;
1076 int lockdep_depth;
1077 unsigned int lockdep_recursion;
1078 struct held_lock held_locks[MAX_LOCK_DEPTH];
1079#endif
1080
1081#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1082 unsigned int in_ubsan;
1083#endif
1084
1085
1086 void *journal_info;
1087
1088
1089 struct bio_list *bio_list;
1090
1091#ifdef CONFIG_BLOCK
1092
1093 struct blk_plug *plug;
1094#endif
1095
1096
1097 struct reclaim_state *reclaim_state;
1098
1099 struct backing_dev_info *backing_dev_info;
1100
1101 struct io_context *io_context;
1102
1103#ifdef CONFIG_COMPACTION
1104 struct capture_control *capture_control;
1105#endif
1106
1107 unsigned long ptrace_message;
1108 kernel_siginfo_t *last_siginfo;
1109
1110 struct task_io_accounting ioac;
1111#ifdef CONFIG_PSI
1112
1113 unsigned int psi_flags;
1114#endif
1115#ifdef CONFIG_TASK_XACCT
1116
1117 u64 acct_rss_mem1;
1118
1119 u64 acct_vm_mem1;
1120
1121 u64 acct_timexpd;
1122#endif
1123#ifdef CONFIG_CPUSETS
1124
1125 nodemask_t mems_allowed;
1126
1127 seqcount_spinlock_t mems_allowed_seq;
1128 int cpuset_mem_spread_rotor;
1129 int cpuset_slab_spread_rotor;
1130#endif
1131#ifdef CONFIG_CGROUPS
1132
1133 struct css_set __rcu *cgroups;
1134
1135 struct list_head cg_list;
1136#endif
1137#ifdef CONFIG_X86_CPU_RESCTRL
1138 u32 closid;
1139 u32 rmid;
1140#endif
1141#ifdef CONFIG_FUTEX
1142 struct robust_list_head __user *robust_list;
1143#ifdef CONFIG_COMPAT
1144 struct compat_robust_list_head __user *compat_robust_list;
1145#endif
1146 struct list_head pi_state_list;
1147 struct futex_pi_state *pi_state_cache;
1148 struct mutex futex_exit_mutex;
1149 unsigned int futex_state;
1150#endif
1151#ifdef CONFIG_PERF_EVENTS
1152 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1153 struct mutex perf_event_mutex;
1154 struct list_head perf_event_list;
1155#endif
1156#ifdef CONFIG_DEBUG_PREEMPT
1157 unsigned long preempt_disable_ip;
1158#endif
1159#ifdef CONFIG_NUMA
1160
1161 struct mempolicy *mempolicy;
1162 short il_prev;
1163 short pref_node_fork;
1164#endif
1165#ifdef CONFIG_NUMA_BALANCING
1166 int numa_scan_seq;
1167 unsigned int numa_scan_period;
1168 unsigned int numa_scan_period_max;
1169 int numa_preferred_nid;
1170 unsigned long numa_migrate_retry;
1171
1172 u64 node_stamp;
1173 u64 last_task_numa_placement;
1174 u64 last_sum_exec_runtime;
1175 struct callback_head numa_work;
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185 struct numa_group __rcu *numa_group;
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 unsigned long *numa_faults;
1202 unsigned long total_numa_faults;
1203
1204
1205
1206
1207
1208
1209
1210 unsigned long numa_faults_locality[3];
1211
1212 unsigned long numa_pages_migrated;
1213#endif
1214
1215#ifdef CONFIG_RSEQ
1216 struct rseq __user *rseq;
1217 u32 rseq_sig;
1218
1219
1220
1221
1222 unsigned long rseq_event_mask;
1223#endif
1224
1225 struct tlbflush_unmap_batch tlb_ubc;
1226
1227 union {
1228 refcount_t rcu_users;
1229 struct rcu_head rcu;
1230 };
1231
1232
1233 struct pipe_inode_info *splice_pipe;
1234
1235 struct page_frag task_frag;
1236
1237#ifdef CONFIG_TASK_DELAY_ACCT
1238 struct task_delay_info *delays;
1239#endif
1240
1241#ifdef CONFIG_FAULT_INJECTION
1242 int make_it_fail;
1243 unsigned int fail_nth;
1244#endif
1245
1246
1247
1248
1249 int nr_dirtied;
1250 int nr_dirtied_pause;
1251
1252 unsigned long dirty_paused_when;
1253
1254#ifdef CONFIG_LATENCYTOP
1255 int latency_record_count;
1256 struct latency_record latency_record[LT_SAVECOUNT];
1257#endif
1258
1259
1260
1261
1262 u64 timer_slack_ns;
1263 u64 default_timer_slack_ns;
1264
1265#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1266 unsigned int kasan_depth;
1267#endif
1268
1269#ifdef CONFIG_KCSAN
1270 struct kcsan_ctx kcsan_ctx;
1271#ifdef CONFIG_TRACE_IRQFLAGS
1272 struct irqtrace_events kcsan_save_irqtrace;
1273#endif
1274#endif
1275
1276#if IS_ENABLED(CONFIG_KUNIT)
1277 struct kunit *kunit_test;
1278#endif
1279
1280#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1281
1282 int curr_ret_stack;
1283 int curr_ret_depth;
1284
1285
1286 struct ftrace_ret_stack *ret_stack;
1287
1288
1289 unsigned long long ftrace_timestamp;
1290
1291
1292
1293
1294
1295 atomic_t trace_overrun;
1296
1297
1298 atomic_t tracing_graph_pause;
1299#endif
1300
1301#ifdef CONFIG_TRACING
1302
1303 unsigned long trace;
1304
1305
1306 unsigned long trace_recursion;
1307#endif
1308
1309#ifdef CONFIG_KCOV
1310
1311
1312
1313 unsigned int kcov_mode;
1314
1315
1316 unsigned int kcov_size;
1317
1318
1319 void *kcov_area;
1320
1321
1322 struct kcov *kcov;
1323
1324
1325 u64 kcov_handle;
1326
1327
1328 int kcov_sequence;
1329
1330
1331 unsigned int kcov_softirq;
1332#endif
1333
1334#ifdef CONFIG_MEMCG
1335 struct mem_cgroup *memcg_in_oom;
1336 gfp_t memcg_oom_gfp_mask;
1337 int memcg_oom_order;
1338
1339
1340 unsigned int memcg_nr_pages_over_high;
1341
1342
1343 struct mem_cgroup *active_memcg;
1344#endif
1345
1346#ifdef CONFIG_BLK_CGROUP
1347 struct request_queue *throttle_queue;
1348#endif
1349
1350#ifdef CONFIG_UPROBES
1351 struct uprobe_task *utask;
1352#endif
1353#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1354 unsigned int sequential_io;
1355 unsigned int sequential_io_avg;
1356#endif
1357 struct kmap_ctrl kmap_ctrl;
1358#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1359 unsigned long task_state_change;
1360#endif
1361 int pagefault_disabled;
1362#ifdef CONFIG_MMU
1363 struct task_struct *oom_reaper_list;
1364#endif
1365#ifdef CONFIG_VMAP_STACK
1366 struct vm_struct *stack_vm_area;
1367#endif
1368#ifdef CONFIG_THREAD_INFO_IN_TASK
1369
1370 refcount_t stack_refcount;
1371#endif
1372#ifdef CONFIG_LIVEPATCH
1373 int patch_state;
1374#endif
1375#ifdef CONFIG_SECURITY
1376
1377 void *security;
1378#endif
1379#ifdef CONFIG_BPF_SYSCALL
1380
1381 struct bpf_local_storage __rcu *bpf_storage;
1382#endif
1383
1384#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1385 unsigned long lowest_stack;
1386 unsigned long prev_lowest_stack;
1387#endif
1388
1389#ifdef CONFIG_X86_MCE
1390 void __user *mce_vaddr;
1391 __u64 mce_kflags;
1392 u64 mce_addr;
1393 __u64 mce_ripv : 1,
1394 mce_whole_page : 1,
1395 __mce_reserved : 62;
1396 struct callback_head mce_kill_me;
1397#endif
1398
1399#ifdef CONFIG_KRETPROBES
1400 struct llist_head kretprobe_instances;
1401#endif
1402
1403
1404
1405
1406
1407 randomized_struct_fields_end
1408
1409
1410 struct thread_struct thread;
1411
1412
1413
1414
1415
1416
1417
1418};
1419
1420static inline struct pid *task_pid(struct task_struct *task)
1421{
1422 return task->thread_pid;
1423}
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1437
1438static inline pid_t task_pid_nr(struct task_struct *tsk)
1439{
1440 return tsk->pid;
1441}
1442
1443static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1444{
1445 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1446}
1447
1448static inline pid_t task_pid_vnr(struct task_struct *tsk)
1449{
1450 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1451}
1452
1453
1454static inline pid_t task_tgid_nr(struct task_struct *tsk)
1455{
1456 return tsk->tgid;
1457}
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469static inline int pid_alive(const struct task_struct *p)
1470{
1471 return p->thread_pid != NULL;
1472}
1473
1474static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1475{
1476 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1477}
1478
1479static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1480{
1481 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1482}
1483
1484
1485static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1486{
1487 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1488}
1489
1490static inline pid_t task_session_vnr(struct task_struct *tsk)
1491{
1492 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1493}
1494
1495static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1496{
1497 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1498}
1499
1500static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1501{
1502 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1503}
1504
1505static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1506{
1507 pid_t pid = 0;
1508
1509 rcu_read_lock();
1510 if (pid_alive(tsk))
1511 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1512 rcu_read_unlock();
1513
1514 return pid;
1515}
1516
1517static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1518{
1519 return task_ppid_nr_ns(tsk, &init_pid_ns);
1520}
1521
1522
1523static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1524{
1525 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1526}
1527
1528#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1529#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1530
1531static inline unsigned int task_state_index(struct task_struct *tsk)
1532{
1533 unsigned int tsk_state = READ_ONCE(tsk->__state);
1534 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1535
1536 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1537
1538 if (tsk_state == TASK_IDLE)
1539 state = TASK_REPORT_IDLE;
1540
1541 return fls(state);
1542}
1543
1544static inline char task_index_to_char(unsigned int state)
1545{
1546 static const char state_char[] = "RSDTtXZPI";
1547
1548 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1549
1550 return state_char[state];
1551}
1552
1553static inline char task_state_to_char(struct task_struct *tsk)
1554{
1555 return task_index_to_char(task_state_index(tsk));
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567static inline int is_global_init(struct task_struct *tsk)
1568{
1569 return task_tgid_nr(tsk) == 1;
1570}
1571
1572extern struct pid *cad_pid;
1573
1574
1575
1576
1577#define PF_VCPU 0x00000001
1578#define PF_IDLE 0x00000002
1579#define PF_EXITING 0x00000004
1580#define PF_IO_WORKER 0x00000010
1581#define PF_WQ_WORKER 0x00000020
1582#define PF_FORKNOEXEC 0x00000040
1583#define PF_MCE_PROCESS 0x00000080
1584#define PF_SUPERPRIV 0x00000100
1585#define PF_DUMPCORE 0x00000200
1586#define PF_SIGNALED 0x00000400
1587#define PF_MEMALLOC 0x00000800
1588#define PF_NPROC_EXCEEDED 0x00001000
1589#define PF_USED_MATH 0x00002000
1590#define PF_USED_ASYNC 0x00004000
1591#define PF_NOFREEZE 0x00008000
1592#define PF_FROZEN 0x00010000
1593#define PF_KSWAPD 0x00020000
1594#define PF_MEMALLOC_NOFS 0x00040000
1595#define PF_MEMALLOC_NOIO 0x00080000
1596#define PF_LOCAL_THROTTLE 0x00100000
1597
1598#define PF_KTHREAD 0x00200000
1599#define PF_RANDOMIZE 0x00400000
1600#define PF_SWAPWRITE 0x00800000
1601#define PF_NO_SETAFFINITY 0x04000000
1602#define PF_MCE_EARLY 0x08000000
1603#define PF_MEMALLOC_PIN 0x10000000
1604#define PF_FREEZER_SKIP 0x40000000
1605#define PF_SUSPEND_TASK 0x80000000
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1619#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1620#define clear_used_math() clear_stopped_child_used_math(current)
1621#define set_used_math() set_stopped_child_used_math(current)
1622
1623#define conditional_stopped_child_used_math(condition, child) \
1624 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1625
1626#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1627
1628#define copy_to_stopped_child_used_math(child) \
1629 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1630
1631
1632#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1633#define used_math() tsk_used_math(current)
1634
1635static inline bool is_percpu_thread(void)
1636{
1637#ifdef CONFIG_SMP
1638 return (current->flags & PF_NO_SETAFFINITY) &&
1639 (current->nr_cpus_allowed == 1);
1640#else
1641 return true;
1642#endif
1643}
1644
1645
1646#define PFA_NO_NEW_PRIVS 0
1647#define PFA_SPREAD_PAGE 1
1648#define PFA_SPREAD_SLAB 2
1649#define PFA_SPEC_SSB_DISABLE 3
1650#define PFA_SPEC_SSB_FORCE_DISABLE 4
1651#define PFA_SPEC_IB_DISABLE 5
1652#define PFA_SPEC_IB_FORCE_DISABLE 6
1653#define PFA_SPEC_SSB_NOEXEC 7
1654
1655#define TASK_PFA_TEST(name, func) \
1656 static inline bool task_##func(struct task_struct *p) \
1657 { return test_bit(PFA_##name, &p->atomic_flags); }
1658
1659#define TASK_PFA_SET(name, func) \
1660 static inline void task_set_##func(struct task_struct *p) \
1661 { set_bit(PFA_##name, &p->atomic_flags); }
1662
1663#define TASK_PFA_CLEAR(name, func) \
1664 static inline void task_clear_##func(struct task_struct *p) \
1665 { clear_bit(PFA_##name, &p->atomic_flags); }
1666
1667TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1668TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1669
1670TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1671TASK_PFA_SET(SPREAD_PAGE, spread_page)
1672TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1673
1674TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1675TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1676TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1677
1678TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1679TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1680TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1681
1682TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1683TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1684TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1685
1686TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1687TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1688
1689TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1690TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1691TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1692
1693TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1694TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1695
1696static inline void
1697current_restore_flags(unsigned long orig_flags, unsigned long flags)
1698{
1699 current->flags &= ~flags;
1700 current->flags |= orig_flags & flags;
1701}
1702
1703extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1704extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1705#ifdef CONFIG_SMP
1706extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1707extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1708#else
1709static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1710{
1711}
1712static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1713{
1714 if (!cpumask_test_cpu(0, new_mask))
1715 return -EINVAL;
1716 return 0;
1717}
1718#endif
1719
1720extern int yield_to(struct task_struct *p, bool preempt);
1721extern void set_user_nice(struct task_struct *p, long nice);
1722extern int task_prio(const struct task_struct *p);
1723
1724
1725
1726
1727
1728
1729
1730static inline int task_nice(const struct task_struct *p)
1731{
1732 return PRIO_TO_NICE((p)->static_prio);
1733}
1734
1735extern int can_nice(const struct task_struct *p, const int nice);
1736extern int task_curr(const struct task_struct *p);
1737extern int idle_cpu(int cpu);
1738extern int available_idle_cpu(int cpu);
1739extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1740extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1741extern void sched_set_fifo(struct task_struct *p);
1742extern void sched_set_fifo_low(struct task_struct *p);
1743extern void sched_set_normal(struct task_struct *p, int nice);
1744extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1745extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1746extern struct task_struct *idle_task(int cpu);
1747
1748
1749
1750
1751
1752
1753
1754static __always_inline bool is_idle_task(const struct task_struct *p)
1755{
1756 return !!(p->flags & PF_IDLE);
1757}
1758
1759extern struct task_struct *curr_task(int cpu);
1760extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1761
1762void yield(void);
1763
1764union thread_union {
1765#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1766 struct task_struct task;
1767#endif
1768#ifndef CONFIG_THREAD_INFO_IN_TASK
1769 struct thread_info thread_info;
1770#endif
1771 unsigned long stack[THREAD_SIZE/sizeof(long)];
1772};
1773
1774#ifndef CONFIG_THREAD_INFO_IN_TASK
1775extern struct thread_info init_thread_info;
1776#endif
1777
1778extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1779
1780#ifdef CONFIG_THREAD_INFO_IN_TASK
1781static inline struct thread_info *task_thread_info(struct task_struct *task)
1782{
1783 return &task->thread_info;
1784}
1785#elif !defined(__HAVE_THREAD_FUNCTIONS)
1786# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1787#endif
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800extern struct task_struct *find_task_by_vpid(pid_t nr);
1801extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1802
1803
1804
1805
1806extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1807
1808extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1809extern int wake_up_process(struct task_struct *tsk);
1810extern void wake_up_new_task(struct task_struct *tsk);
1811
1812#ifdef CONFIG_SMP
1813extern void kick_process(struct task_struct *tsk);
1814#else
1815static inline void kick_process(struct task_struct *tsk) { }
1816#endif
1817
1818extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1819
1820static inline void set_task_comm(struct task_struct *tsk, const char *from)
1821{
1822 __set_task_comm(tsk, from, false);
1823}
1824
1825extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1826#define get_task_comm(buf, tsk) ({ \
1827 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1828 __get_task_comm(buf, sizeof(buf), tsk); \
1829})
1830
1831#ifdef CONFIG_SMP
1832static __always_inline void scheduler_ipi(void)
1833{
1834
1835
1836
1837
1838
1839 preempt_fold_need_resched();
1840}
1841extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1842#else
1843static inline void scheduler_ipi(void) { }
1844static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
1845{
1846 return 1;
1847}
1848#endif
1849
1850
1851
1852
1853
1854static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1855{
1856 set_ti_thread_flag(task_thread_info(tsk), flag);
1857}
1858
1859static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1860{
1861 clear_ti_thread_flag(task_thread_info(tsk), flag);
1862}
1863
1864static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1865 bool value)
1866{
1867 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1868}
1869
1870static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1871{
1872 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1873}
1874
1875static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1876{
1877 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1878}
1879
1880static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1881{
1882 return test_ti_thread_flag(task_thread_info(tsk), flag);
1883}
1884
1885static inline void set_tsk_need_resched(struct task_struct *tsk)
1886{
1887 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1888}
1889
1890static inline void clear_tsk_need_resched(struct task_struct *tsk)
1891{
1892 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1893}
1894
1895static inline int test_tsk_need_resched(struct task_struct *tsk)
1896{
1897 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1898}
1899
1900
1901
1902
1903
1904
1905
1906#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
1907extern int __cond_resched(void);
1908
1909#ifdef CONFIG_PREEMPT_DYNAMIC
1910
1911DECLARE_STATIC_CALL(cond_resched, __cond_resched);
1912
1913static __always_inline int _cond_resched(void)
1914{
1915 return static_call_mod(cond_resched)();
1916}
1917
1918#else
1919
1920static inline int _cond_resched(void)
1921{
1922 return __cond_resched();
1923}
1924
1925#endif
1926
1927#else
1928
1929static inline int _cond_resched(void) { return 0; }
1930
1931#endif
1932
1933#define cond_resched() ({ \
1934 ___might_sleep(__FILE__, __LINE__, 0); \
1935 _cond_resched(); \
1936})
1937
1938extern int __cond_resched_lock(spinlock_t *lock);
1939extern int __cond_resched_rwlock_read(rwlock_t *lock);
1940extern int __cond_resched_rwlock_write(rwlock_t *lock);
1941
1942#define cond_resched_lock(lock) ({ \
1943 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1944 __cond_resched_lock(lock); \
1945})
1946
1947#define cond_resched_rwlock_read(lock) ({ \
1948 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
1949 __cond_resched_rwlock_read(lock); \
1950})
1951
1952#define cond_resched_rwlock_write(lock) ({ \
1953 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
1954 __cond_resched_rwlock_write(lock); \
1955})
1956
1957static inline void cond_resched_rcu(void)
1958{
1959#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1960 rcu_read_unlock();
1961 cond_resched();
1962 rcu_read_lock();
1963#endif
1964}
1965
1966
1967
1968
1969
1970
1971static inline int spin_needbreak(spinlock_t *lock)
1972{
1973#ifdef CONFIG_PREEMPTION
1974 return spin_is_contended(lock);
1975#else
1976 return 0;
1977#endif
1978}
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988static inline int rwlock_needbreak(rwlock_t *lock)
1989{
1990#ifdef CONFIG_PREEMPTION
1991 return rwlock_is_contended(lock);
1992#else
1993 return 0;
1994#endif
1995}
1996
1997static __always_inline bool need_resched(void)
1998{
1999 return unlikely(tif_need_resched());
2000}
2001
2002
2003
2004
2005#ifdef CONFIG_SMP
2006
2007static inline unsigned int task_cpu(const struct task_struct *p)
2008{
2009#ifdef CONFIG_THREAD_INFO_IN_TASK
2010 return READ_ONCE(p->cpu);
2011#else
2012 return READ_ONCE(task_thread_info(p)->cpu);
2013#endif
2014}
2015
2016extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2017
2018#else
2019
2020static inline unsigned int task_cpu(const struct task_struct *p)
2021{
2022 return 0;
2023}
2024
2025static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2026{
2027}
2028
2029#endif
2030
2031extern bool sched_task_on_rq(struct task_struct *p);
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041#ifndef vcpu_is_preempted
2042static inline bool vcpu_is_preempted(int cpu)
2043{
2044 return false;
2045}
2046#endif
2047
2048extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2049extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2050
2051#ifndef TASK_SIZE_OF
2052#define TASK_SIZE_OF(tsk) TASK_SIZE
2053#endif
2054
2055#ifdef CONFIG_SMP
2056
2057unsigned long sched_cpu_util(int cpu, unsigned long max);
2058#endif
2059
2060#ifdef CONFIG_RSEQ
2061
2062
2063
2064
2065
2066enum rseq_event_mask_bits {
2067 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2068 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2069 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2070};
2071
2072enum rseq_event_mask {
2073 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
2074 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
2075 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
2076};
2077
2078static inline void rseq_set_notify_resume(struct task_struct *t)
2079{
2080 if (t->rseq)
2081 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2082}
2083
2084void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2085
2086static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2087 struct pt_regs *regs)
2088{
2089 if (current->rseq)
2090 __rseq_handle_notify_resume(ksig, regs);
2091}
2092
2093static inline void rseq_signal_deliver(struct ksignal *ksig,
2094 struct pt_regs *regs)
2095{
2096 preempt_disable();
2097 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
2098 preempt_enable();
2099 rseq_handle_notify_resume(ksig, regs);
2100}
2101
2102
2103static inline void rseq_preempt(struct task_struct *t)
2104{
2105 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2106 rseq_set_notify_resume(t);
2107}
2108
2109
2110static inline void rseq_migrate(struct task_struct *t)
2111{
2112 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2113 rseq_set_notify_resume(t);
2114}
2115
2116
2117
2118
2119
2120static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2121{
2122 if (clone_flags & CLONE_VM) {
2123 t->rseq = NULL;
2124 t->rseq_sig = 0;
2125 t->rseq_event_mask = 0;
2126 } else {
2127 t->rseq = current->rseq;
2128 t->rseq_sig = current->rseq_sig;
2129 t->rseq_event_mask = current->rseq_event_mask;
2130 }
2131}
2132
2133static inline void rseq_execve(struct task_struct *t)
2134{
2135 t->rseq = NULL;
2136 t->rseq_sig = 0;
2137 t->rseq_event_mask = 0;
2138}
2139
2140#else
2141
2142static inline void rseq_set_notify_resume(struct task_struct *t)
2143{
2144}
2145static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2146 struct pt_regs *regs)
2147{
2148}
2149static inline void rseq_signal_deliver(struct ksignal *ksig,
2150 struct pt_regs *regs)
2151{
2152}
2153static inline void rseq_preempt(struct task_struct *t)
2154{
2155}
2156static inline void rseq_migrate(struct task_struct *t)
2157{
2158}
2159static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2160{
2161}
2162static inline void rseq_execve(struct task_struct *t)
2163{
2164}
2165
2166#endif
2167
2168#ifdef CONFIG_DEBUG_RSEQ
2169
2170void rseq_syscall(struct pt_regs *regs);
2171
2172#else
2173
2174static inline void rseq_syscall(struct pt_regs *regs)
2175{
2176}
2177
2178#endif
2179
2180const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2181char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2182int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2183
2184const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2185const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2186const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2187
2188int sched_trace_rq_cpu(struct rq *rq);
2189int sched_trace_rq_cpu_capacity(struct rq *rq);
2190int sched_trace_rq_nr_running(struct rq *rq);
2191
2192const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2193
2194#ifdef CONFIG_SCHED_CORE
2195extern void sched_core_free(struct task_struct *tsk);
2196extern void sched_core_fork(struct task_struct *p);
2197extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2198 unsigned long uaddr);
2199#else
2200static inline void sched_core_free(struct task_struct *tsk) { }
2201static inline void sched_core_fork(struct task_struct *p) { }
2202#endif
2203
2204#endif
2205