1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/mutex.h>
18#include <linux/plist.h>
19#include <linux/hrtimer.h>
20#include <linux/irqflags.h>
21#include <linux/seccomp.h>
22#include <linux/nodemask.h>
23#include <linux/rcupdate.h>
24#include <linux/refcount.h>
25#include <linux/resource.h>
26#include <linux/latencytop.h>
27#include <linux/sched/prio.h>
28#include <linux/sched/types.h>
29#include <linux/signal_types.h>
30#include <linux/syscall_user_dispatch.h>
31#include <linux/mm_types_task.h>
32#include <linux/task_io_accounting.h>
33#include <linux/posix-timers.h>
34#include <linux/rseq.h>
35#include <linux/seqlock.h>
36#include <linux/kcsan.h>
37#include <asm/kmap_size.h>
38
39
40struct audit_context;
41struct backing_dev_info;
42struct bio_list;
43struct blk_plug;
44struct bpf_local_storage;
45struct bpf_run_ctx;
46struct capture_control;
47struct cfs_rq;
48struct fs_struct;
49struct futex_pi_state;
50struct io_context;
51struct io_uring_task;
52struct mempolicy;
53struct nameidata;
54struct nsproxy;
55struct perf_event_context;
56struct pid_namespace;
57struct pipe_inode_info;
58struct rcu_node;
59struct reclaim_state;
60struct robust_list_head;
61struct root_domain;
62struct rq;
63struct sched_attr;
64struct sched_param;
65struct seq_file;
66struct sighand_struct;
67struct signal_struct;
68struct task_delay_info;
69struct task_group;
70
71
72
73
74
75
76
77
78
79
80
81
82
83#define TASK_RUNNING 0x0000
84#define TASK_INTERRUPTIBLE 0x0001
85#define TASK_UNINTERRUPTIBLE 0x0002
86#define __TASK_STOPPED 0x0004
87#define __TASK_TRACED 0x0008
88
89#define EXIT_DEAD 0x0010
90#define EXIT_ZOMBIE 0x0020
91#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
92
93#define TASK_PARKED 0x0040
94#define TASK_DEAD 0x0080
95#define TASK_WAKEKILL 0x0100
96#define TASK_WAKING 0x0200
97#define TASK_NOLOAD 0x0400
98#define TASK_NEW 0x0800
99
100#define TASK_RTLOCK_WAIT 0x1000
101#define TASK_STATE_MAX 0x2000
102
103
104#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
105#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
106#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
107
108#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
109
110
111#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
112
113
114#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
115 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
116 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
117 TASK_PARKED)
118
119#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
120
121#define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
122
123#define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
124
125#define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
126
127
128
129
130
131#define is_special_task_state(state) \
132 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
133
134#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
135# define debug_normal_state_change(state_value) \
136 do { \
137 WARN_ON_ONCE(is_special_task_state(state_value)); \
138 current->task_state_change = _THIS_IP_; \
139 } while (0)
140
141# define debug_special_state_change(state_value) \
142 do { \
143 WARN_ON_ONCE(!is_special_task_state(state_value)); \
144 current->task_state_change = _THIS_IP_; \
145 } while (0)
146
147# define debug_rtlock_wait_set_state() \
148 do { \
149 current->saved_state_change = current->task_state_change;\
150 current->task_state_change = _THIS_IP_; \
151 } while (0)
152
153# define debug_rtlock_wait_restore_state() \
154 do { \
155 current->task_state_change = current->saved_state_change;\
156 } while (0)
157
158#else
159# define debug_normal_state_change(cond) do { } while (0)
160# define debug_special_state_change(cond) do { } while (0)
161# define debug_rtlock_wait_set_state() do { } while (0)
162# define debug_rtlock_wait_restore_state() do { } while (0)
163#endif
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202#define __set_current_state(state_value) \
203 do { \
204 debug_normal_state_change((state_value)); \
205 WRITE_ONCE(current->__state, (state_value)); \
206 } while (0)
207
208#define set_current_state(state_value) \
209 do { \
210 debug_normal_state_change((state_value)); \
211 smp_store_mb(current->__state, (state_value)); \
212 } while (0)
213
214
215
216
217
218
219
220#define set_special_state(state_value) \
221 do { \
222 unsigned long flags; \
223 \
224 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
225 debug_special_state_change((state_value)); \
226 WRITE_ONCE(current->__state, (state_value)); \
227 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
228 } while (0)
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255#define current_save_and_set_rtlock_wait_state() \
256 do { \
257 lockdep_assert_irqs_disabled(); \
258 raw_spin_lock(¤t->pi_lock); \
259 current->saved_state = current->__state; \
260 debug_rtlock_wait_set_state(); \
261 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
262 raw_spin_unlock(¤t->pi_lock); \
263 } while (0);
264
265#define current_restore_rtlock_saved_state() \
266 do { \
267 lockdep_assert_irqs_disabled(); \
268 raw_spin_lock(¤t->pi_lock); \
269 debug_rtlock_wait_restore_state(); \
270 WRITE_ONCE(current->__state, current->saved_state); \
271 current->saved_state = TASK_RUNNING; \
272 raw_spin_unlock(¤t->pi_lock); \
273 } while (0);
274
275#define get_current_state() READ_ONCE(current->__state)
276
277
278#define TASK_COMM_LEN 16
279
280extern void scheduler_tick(void);
281
282#define MAX_SCHEDULE_TIMEOUT LONG_MAX
283
284extern long schedule_timeout(long timeout);
285extern long schedule_timeout_interruptible(long timeout);
286extern long schedule_timeout_killable(long timeout);
287extern long schedule_timeout_uninterruptible(long timeout);
288extern long schedule_timeout_idle(long timeout);
289asmlinkage void schedule(void);
290extern void schedule_preempt_disabled(void);
291asmlinkage void preempt_schedule_irq(void);
292#ifdef CONFIG_PREEMPT_RT
293 extern void schedule_rtlock(void);
294#endif
295
296extern int __must_check io_schedule_prepare(void);
297extern void io_schedule_finish(int token);
298extern long io_schedule_timeout(long timeout);
299extern void io_schedule(void);
300
301
302
303
304
305
306
307
308
309
310struct prev_cputime {
311#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
312 u64 utime;
313 u64 stime;
314 raw_spinlock_t lock;
315#endif
316};
317
318enum vtime_state {
319
320 VTIME_INACTIVE = 0,
321
322 VTIME_IDLE,
323
324 VTIME_SYS,
325
326 VTIME_USER,
327
328 VTIME_GUEST,
329};
330
331struct vtime {
332 seqcount_t seqcount;
333 unsigned long long starttime;
334 enum vtime_state state;
335 unsigned int cpu;
336 u64 utime;
337 u64 stime;
338 u64 gtime;
339};
340
341
342
343
344
345
346
347enum uclamp_id {
348 UCLAMP_MIN = 0,
349 UCLAMP_MAX,
350 UCLAMP_CNT
351};
352
353#ifdef CONFIG_SMP
354extern struct root_domain def_root_domain;
355extern struct mutex sched_domains_mutex;
356#endif
357
358struct sched_info {
359#ifdef CONFIG_SCHED_INFO
360
361
362
363 unsigned long pcount;
364
365
366 unsigned long long run_delay;
367
368
369
370
371 unsigned long long last_arrival;
372
373
374 unsigned long long last_queued;
375
376#endif
377};
378
379
380
381
382
383
384
385
386# define SCHED_FIXEDPOINT_SHIFT 10
387# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
388
389
390# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
391# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
392
393struct load_weight {
394 unsigned long weight;
395 u32 inv_weight;
396};
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427struct util_est {
428 unsigned int enqueued;
429 unsigned int ewma;
430#define UTIL_EST_WEIGHT_SHIFT 2
431#define UTIL_AVG_UNCHANGED 0x80000000
432} __attribute__((__aligned__(sizeof(u64))));
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479struct sched_avg {
480 u64 last_update_time;
481 u64 load_sum;
482 u64 runnable_sum;
483 u32 util_sum;
484 u32 period_contrib;
485 unsigned long load_avg;
486 unsigned long runnable_avg;
487 unsigned long util_avg;
488 struct util_est util_est;
489} ____cacheline_aligned;
490
491struct sched_statistics {
492#ifdef CONFIG_SCHEDSTATS
493 u64 wait_start;
494 u64 wait_max;
495 u64 wait_count;
496 u64 wait_sum;
497 u64 iowait_count;
498 u64 iowait_sum;
499
500 u64 sleep_start;
501 u64 sleep_max;
502 s64 sum_sleep_runtime;
503
504 u64 block_start;
505 u64 block_max;
506 u64 exec_max;
507 u64 slice_max;
508
509 u64 nr_migrations_cold;
510 u64 nr_failed_migrations_affine;
511 u64 nr_failed_migrations_running;
512 u64 nr_failed_migrations_hot;
513 u64 nr_forced_migrations;
514
515 u64 nr_wakeups;
516 u64 nr_wakeups_sync;
517 u64 nr_wakeups_migrate;
518 u64 nr_wakeups_local;
519 u64 nr_wakeups_remote;
520 u64 nr_wakeups_affine;
521 u64 nr_wakeups_affine_attempts;
522 u64 nr_wakeups_passive;
523 u64 nr_wakeups_idle;
524#endif
525};
526
527struct sched_entity {
528
529 struct load_weight load;
530 struct rb_node run_node;
531 struct list_head group_node;
532 unsigned int on_rq;
533
534 u64 exec_start;
535 u64 sum_exec_runtime;
536 u64 vruntime;
537 u64 prev_sum_exec_runtime;
538
539 u64 nr_migrations;
540
541 struct sched_statistics statistics;
542
543#ifdef CONFIG_FAIR_GROUP_SCHED
544 int depth;
545 struct sched_entity *parent;
546
547 struct cfs_rq *cfs_rq;
548
549 struct cfs_rq *my_q;
550
551 unsigned long runnable_weight;
552#endif
553
554#ifdef CONFIG_SMP
555
556
557
558
559
560
561 struct sched_avg avg;
562#endif
563};
564
565struct sched_rt_entity {
566 struct list_head run_list;
567 unsigned long timeout;
568 unsigned long watchdog_stamp;
569 unsigned int time_slice;
570 unsigned short on_rq;
571 unsigned short on_list;
572
573 struct sched_rt_entity *back;
574#ifdef CONFIG_RT_GROUP_SCHED
575 struct sched_rt_entity *parent;
576
577 struct rt_rq *rt_rq;
578
579 struct rt_rq *my_q;
580#endif
581} __randomize_layout;
582
583struct sched_dl_entity {
584 struct rb_node rb_node;
585
586
587
588
589
590
591 u64 dl_runtime;
592 u64 dl_deadline;
593 u64 dl_period;
594 u64 dl_bw;
595 u64 dl_density;
596
597
598
599
600
601
602 s64 runtime;
603 u64 deadline;
604 unsigned int flags;
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630 unsigned int dl_throttled : 1;
631 unsigned int dl_yielded : 1;
632 unsigned int dl_non_contending : 1;
633 unsigned int dl_overrun : 1;
634
635
636
637
638
639 struct hrtimer dl_timer;
640
641
642
643
644
645
646
647
648 struct hrtimer inactive_timer;
649
650#ifdef CONFIG_RT_MUTEXES
651
652
653
654
655
656 struct sched_dl_entity *pi_se;
657#endif
658};
659
660#ifdef CONFIG_UCLAMP_TASK
661
662#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687struct uclamp_se {
688 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
689 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
690 unsigned int active : 1;
691 unsigned int user_defined : 1;
692};
693#endif
694
695union rcu_special {
696 struct {
697 u8 blocked;
698 u8 need_qs;
699 u8 exp_hint;
700 u8 need_mb;
701 } b;
702 u32 s;
703};
704
705enum perf_event_task_context {
706 perf_invalid_context = -1,
707 perf_hw_context = 0,
708 perf_sw_context,
709 perf_nr_task_contexts,
710};
711
712struct wake_q_node {
713 struct wake_q_node *next;
714};
715
716struct kmap_ctrl {
717#ifdef CONFIG_KMAP_LOCAL
718 int idx;
719 pte_t pteval[KM_MAX_IDX];
720#endif
721};
722
723struct task_struct {
724#ifdef CONFIG_THREAD_INFO_IN_TASK
725
726
727
728
729 struct thread_info thread_info;
730#endif
731 unsigned int __state;
732
733#ifdef CONFIG_PREEMPT_RT
734
735 unsigned int saved_state;
736#endif
737
738
739
740
741
742 randomized_struct_fields_start
743
744 void *stack;
745 refcount_t usage;
746
747 unsigned int flags;
748 unsigned int ptrace;
749
750#ifdef CONFIG_SMP
751 int on_cpu;
752 struct __call_single_node wake_entry;
753#ifdef CONFIG_THREAD_INFO_IN_TASK
754
755 unsigned int cpu;
756#endif
757 unsigned int wakee_flips;
758 unsigned long wakee_flip_decay_ts;
759 struct task_struct *last_wakee;
760
761
762
763
764
765
766
767
768 int recent_used_cpu;
769 int wake_cpu;
770#endif
771 int on_rq;
772
773 int prio;
774 int static_prio;
775 int normal_prio;
776 unsigned int rt_priority;
777
778 const struct sched_class *sched_class;
779 struct sched_entity se;
780 struct sched_rt_entity rt;
781 struct sched_dl_entity dl;
782
783#ifdef CONFIG_SCHED_CORE
784 struct rb_node core_node;
785 unsigned long core_cookie;
786 unsigned int core_occupation;
787#endif
788
789#ifdef CONFIG_CGROUP_SCHED
790 struct task_group *sched_task_group;
791#endif
792
793#ifdef CONFIG_UCLAMP_TASK
794
795
796
797
798 struct uclamp_se uclamp_req[UCLAMP_CNT];
799
800
801
802
803 struct uclamp_se uclamp[UCLAMP_CNT];
804#endif
805
806#ifdef CONFIG_PREEMPT_NOTIFIERS
807
808 struct hlist_head preempt_notifiers;
809#endif
810
811#ifdef CONFIG_BLK_DEV_IO_TRACE
812 unsigned int btrace_seq;
813#endif
814
815 unsigned int policy;
816 int nr_cpus_allowed;
817 const cpumask_t *cpus_ptr;
818 cpumask_t *user_cpus_ptr;
819 cpumask_t cpus_mask;
820 void *migration_pending;
821#ifdef CONFIG_SMP
822 unsigned short migration_disabled;
823#endif
824 unsigned short migration_flags;
825
826#ifdef CONFIG_PREEMPT_RCU
827 int rcu_read_lock_nesting;
828 union rcu_special rcu_read_unlock_special;
829 struct list_head rcu_node_entry;
830 struct rcu_node *rcu_blocked_node;
831#endif
832
833#ifdef CONFIG_TASKS_RCU
834 unsigned long rcu_tasks_nvcsw;
835 u8 rcu_tasks_holdout;
836 u8 rcu_tasks_idx;
837 int rcu_tasks_idle_cpu;
838 struct list_head rcu_tasks_holdout_list;
839#endif
840
841#ifdef CONFIG_TASKS_TRACE_RCU
842 int trc_reader_nesting;
843 int trc_ipi_to_cpu;
844 union rcu_special trc_reader_special;
845 bool trc_reader_checked;
846 struct list_head trc_holdout_list;
847#endif
848
849 struct sched_info sched_info;
850
851 struct list_head tasks;
852#ifdef CONFIG_SMP
853 struct plist_node pushable_tasks;
854 struct rb_node pushable_dl_tasks;
855#endif
856
857 struct mm_struct *mm;
858 struct mm_struct *active_mm;
859
860
861 struct vmacache vmacache;
862
863#ifdef SPLIT_RSS_COUNTING
864 struct task_rss_stat rss_stat;
865#endif
866 int exit_state;
867 int exit_code;
868 int exit_signal;
869
870 int pdeath_signal;
871
872 unsigned long jobctl;
873
874
875 unsigned int personality;
876
877
878 unsigned sched_reset_on_fork:1;
879 unsigned sched_contributes_to_load:1;
880 unsigned sched_migrated:1;
881#ifdef CONFIG_PSI
882 unsigned sched_psi_wake_requeue:1;
883#endif
884
885
886 unsigned :0;
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903 unsigned sched_remote_wakeup:1;
904
905
906 unsigned in_execve:1;
907 unsigned in_iowait:1;
908#ifndef TIF_RESTORE_SIGMASK
909 unsigned restore_sigmask:1;
910#endif
911#ifdef CONFIG_MEMCG
912 unsigned in_user_fault:1;
913#endif
914#ifdef CONFIG_COMPAT_BRK
915 unsigned brk_randomized:1;
916#endif
917#ifdef CONFIG_CGROUPS
918
919 unsigned no_cgroup_migration:1;
920
921 unsigned frozen:1;
922#endif
923#ifdef CONFIG_BLK_CGROUP
924 unsigned use_memdelay:1;
925#endif
926#ifdef CONFIG_PSI
927
928 unsigned in_memstall:1;
929#endif
930#ifdef CONFIG_PAGE_OWNER
931
932 unsigned in_page_owner:1;
933#endif
934#ifdef CONFIG_EVENTFD
935
936 unsigned in_eventfd_signal:1;
937#endif
938
939 unsigned long atomic_flags;
940
941 struct restart_block restart_block;
942
943 pid_t pid;
944 pid_t tgid;
945
946#ifdef CONFIG_STACKPROTECTOR
947
948 unsigned long stack_canary;
949#endif
950
951
952
953
954
955
956
957 struct task_struct __rcu *real_parent;
958
959
960 struct task_struct __rcu *parent;
961
962
963
964
965 struct list_head children;
966 struct list_head sibling;
967 struct task_struct *group_leader;
968
969
970
971
972
973
974
975 struct list_head ptraced;
976 struct list_head ptrace_entry;
977
978
979 struct pid *thread_pid;
980 struct hlist_node pid_links[PIDTYPE_MAX];
981 struct list_head thread_group;
982 struct list_head thread_node;
983
984 struct completion *vfork_done;
985
986
987 int __user *set_child_tid;
988
989
990 int __user *clear_child_tid;
991
992
993 void *pf_io_worker;
994
995 u64 utime;
996 u64 stime;
997#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
998 u64 utimescaled;
999 u64 stimescaled;
1000#endif
1001 u64 gtime;
1002 struct prev_cputime prev_cputime;
1003#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1004 struct vtime vtime;
1005#endif
1006
1007#ifdef CONFIG_NO_HZ_FULL
1008 atomic_t tick_dep_mask;
1009#endif
1010
1011 unsigned long nvcsw;
1012 unsigned long nivcsw;
1013
1014
1015 u64 start_time;
1016
1017
1018 u64 start_boottime;
1019
1020
1021 unsigned long min_flt;
1022 unsigned long maj_flt;
1023
1024
1025 struct posix_cputimers posix_cputimers;
1026
1027#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1028 struct posix_cputimers_work posix_cputimers_work;
1029#endif
1030
1031
1032
1033
1034 const struct cred __rcu *ptracer_cred;
1035
1036
1037 const struct cred __rcu *real_cred;
1038
1039
1040 const struct cred __rcu *cred;
1041
1042#ifdef CONFIG_KEYS
1043
1044 struct key *cached_requested_key;
1045#endif
1046
1047
1048
1049
1050
1051
1052
1053
1054 char comm[TASK_COMM_LEN];
1055
1056 struct nameidata *nameidata;
1057
1058#ifdef CONFIG_SYSVIPC
1059 struct sysv_sem sysvsem;
1060 struct sysv_shm sysvshm;
1061#endif
1062#ifdef CONFIG_DETECT_HUNG_TASK
1063 unsigned long last_switch_count;
1064 unsigned long last_switch_time;
1065#endif
1066
1067 struct fs_struct *fs;
1068
1069
1070 struct files_struct *files;
1071
1072#ifdef CONFIG_IO_URING
1073 struct io_uring_task *io_uring;
1074#endif
1075
1076
1077 struct nsproxy *nsproxy;
1078
1079
1080 struct signal_struct *signal;
1081 struct sighand_struct __rcu *sighand;
1082 sigset_t blocked;
1083 sigset_t real_blocked;
1084
1085 sigset_t saved_sigmask;
1086 struct sigpending pending;
1087 unsigned long sas_ss_sp;
1088 size_t sas_ss_size;
1089 unsigned int sas_ss_flags;
1090
1091 struct callback_head *task_works;
1092
1093#ifdef CONFIG_AUDIT
1094#ifdef CONFIG_AUDITSYSCALL
1095 struct audit_context *audit_context;
1096#endif
1097 kuid_t loginuid;
1098 unsigned int sessionid;
1099#endif
1100 struct seccomp seccomp;
1101 struct syscall_user_dispatch syscall_dispatch;
1102
1103
1104 u64 parent_exec_id;
1105 u64 self_exec_id;
1106
1107
1108 spinlock_t alloc_lock;
1109
1110
1111 raw_spinlock_t pi_lock;
1112
1113 struct wake_q_node wake_q;
1114
1115#ifdef CONFIG_RT_MUTEXES
1116
1117 struct rb_root_cached pi_waiters;
1118
1119 struct task_struct *pi_top_task;
1120
1121 struct rt_mutex_waiter *pi_blocked_on;
1122#endif
1123
1124#ifdef CONFIG_DEBUG_MUTEXES
1125
1126 struct mutex_waiter *blocked_on;
1127#endif
1128
1129#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1130 int non_block_count;
1131#endif
1132
1133#ifdef CONFIG_TRACE_IRQFLAGS
1134 struct irqtrace_events irqtrace;
1135 unsigned int hardirq_threaded;
1136 u64 hardirq_chain_key;
1137 int softirqs_enabled;
1138 int softirq_context;
1139 int irq_config;
1140#endif
1141#ifdef CONFIG_PREEMPT_RT
1142 int softirq_disable_cnt;
1143#endif
1144
1145#ifdef CONFIG_LOCKDEP
1146# define MAX_LOCK_DEPTH 48UL
1147 u64 curr_chain_key;
1148 int lockdep_depth;
1149 unsigned int lockdep_recursion;
1150 struct held_lock held_locks[MAX_LOCK_DEPTH];
1151#endif
1152
1153#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1154 unsigned int in_ubsan;
1155#endif
1156
1157
1158 void *journal_info;
1159
1160
1161 struct bio_list *bio_list;
1162
1163#ifdef CONFIG_BLOCK
1164
1165 struct blk_plug *plug;
1166#endif
1167
1168
1169 struct reclaim_state *reclaim_state;
1170
1171 struct backing_dev_info *backing_dev_info;
1172
1173 struct io_context *io_context;
1174
1175#ifdef CONFIG_COMPACTION
1176 struct capture_control *capture_control;
1177#endif
1178
1179 unsigned long ptrace_message;
1180 kernel_siginfo_t *last_siginfo;
1181
1182 struct task_io_accounting ioac;
1183#ifdef CONFIG_PSI
1184
1185 unsigned int psi_flags;
1186#endif
1187#ifdef CONFIG_TASK_XACCT
1188
1189 u64 acct_rss_mem1;
1190
1191 u64 acct_vm_mem1;
1192
1193 u64 acct_timexpd;
1194#endif
1195#ifdef CONFIG_CPUSETS
1196
1197 nodemask_t mems_allowed;
1198
1199 seqcount_spinlock_t mems_allowed_seq;
1200 int cpuset_mem_spread_rotor;
1201 int cpuset_slab_spread_rotor;
1202#endif
1203#ifdef CONFIG_CGROUPS
1204
1205 struct css_set __rcu *cgroups;
1206
1207 struct list_head cg_list;
1208#endif
1209#ifdef CONFIG_X86_CPU_RESCTRL
1210 u32 closid;
1211 u32 rmid;
1212#endif
1213#ifdef CONFIG_FUTEX
1214 struct robust_list_head __user *robust_list;
1215#ifdef CONFIG_COMPAT
1216 struct compat_robust_list_head __user *compat_robust_list;
1217#endif
1218 struct list_head pi_state_list;
1219 struct futex_pi_state *pi_state_cache;
1220 struct mutex futex_exit_mutex;
1221 unsigned int futex_state;
1222#endif
1223#ifdef CONFIG_PERF_EVENTS
1224 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1225 struct mutex perf_event_mutex;
1226 struct list_head perf_event_list;
1227#endif
1228#ifdef CONFIG_DEBUG_PREEMPT
1229 unsigned long preempt_disable_ip;
1230#endif
1231#ifdef CONFIG_NUMA
1232
1233 struct mempolicy *mempolicy;
1234 short il_prev;
1235 short pref_node_fork;
1236#endif
1237#ifdef CONFIG_NUMA_BALANCING
1238 int numa_scan_seq;
1239 unsigned int numa_scan_period;
1240 unsigned int numa_scan_period_max;
1241 int numa_preferred_nid;
1242 unsigned long numa_migrate_retry;
1243
1244 u64 node_stamp;
1245 u64 last_task_numa_placement;
1246 u64 last_sum_exec_runtime;
1247 struct callback_head numa_work;
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 struct numa_group __rcu *numa_group;
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 unsigned long *numa_faults;
1274 unsigned long total_numa_faults;
1275
1276
1277
1278
1279
1280
1281
1282 unsigned long numa_faults_locality[3];
1283
1284 unsigned long numa_pages_migrated;
1285#endif
1286
1287#ifdef CONFIG_RSEQ
1288 struct rseq __user *rseq;
1289 u32 rseq_sig;
1290
1291
1292
1293
1294 unsigned long rseq_event_mask;
1295#endif
1296
1297 struct tlbflush_unmap_batch tlb_ubc;
1298
1299 union {
1300 refcount_t rcu_users;
1301 struct rcu_head rcu;
1302 };
1303
1304
1305 struct pipe_inode_info *splice_pipe;
1306
1307 struct page_frag task_frag;
1308
1309#ifdef CONFIG_TASK_DELAY_ACCT
1310 struct task_delay_info *delays;
1311#endif
1312
1313#ifdef CONFIG_FAULT_INJECTION
1314 int make_it_fail;
1315 unsigned int fail_nth;
1316#endif
1317
1318
1319
1320
1321 int nr_dirtied;
1322 int nr_dirtied_pause;
1323
1324 unsigned long dirty_paused_when;
1325
1326#ifdef CONFIG_LATENCYTOP
1327 int latency_record_count;
1328 struct latency_record latency_record[LT_SAVECOUNT];
1329#endif
1330
1331
1332
1333
1334 u64 timer_slack_ns;
1335 u64 default_timer_slack_ns;
1336
1337#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1338 unsigned int kasan_depth;
1339#endif
1340
1341#ifdef CONFIG_KCSAN
1342 struct kcsan_ctx kcsan_ctx;
1343#ifdef CONFIG_TRACE_IRQFLAGS
1344 struct irqtrace_events kcsan_save_irqtrace;
1345#endif
1346#endif
1347
1348#if IS_ENABLED(CONFIG_KUNIT)
1349 struct kunit *kunit_test;
1350#endif
1351
1352#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1353
1354 int curr_ret_stack;
1355 int curr_ret_depth;
1356
1357
1358 struct ftrace_ret_stack *ret_stack;
1359
1360
1361 unsigned long long ftrace_timestamp;
1362
1363
1364
1365
1366
1367 atomic_t trace_overrun;
1368
1369
1370 atomic_t tracing_graph_pause;
1371#endif
1372
1373#ifdef CONFIG_TRACING
1374
1375 unsigned long trace;
1376
1377
1378 unsigned long trace_recursion;
1379#endif
1380
1381#ifdef CONFIG_KCOV
1382
1383
1384
1385 unsigned int kcov_mode;
1386
1387
1388 unsigned int kcov_size;
1389
1390
1391 void *kcov_area;
1392
1393
1394 struct kcov *kcov;
1395
1396
1397 u64 kcov_handle;
1398
1399
1400 int kcov_sequence;
1401
1402
1403 unsigned int kcov_softirq;
1404#endif
1405
1406#ifdef CONFIG_MEMCG
1407 struct mem_cgroup *memcg_in_oom;
1408 gfp_t memcg_oom_gfp_mask;
1409 int memcg_oom_order;
1410
1411
1412 unsigned int memcg_nr_pages_over_high;
1413
1414
1415 struct mem_cgroup *active_memcg;
1416#endif
1417
1418#ifdef CONFIG_BLK_CGROUP
1419 struct request_queue *throttle_queue;
1420#endif
1421
1422#ifdef CONFIG_UPROBES
1423 struct uprobe_task *utask;
1424#endif
1425#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1426 unsigned int sequential_io;
1427 unsigned int sequential_io_avg;
1428#endif
1429 struct kmap_ctrl kmap_ctrl;
1430#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1431 unsigned long task_state_change;
1432# ifdef CONFIG_PREEMPT_RT
1433 unsigned long saved_state_change;
1434# endif
1435#endif
1436 int pagefault_disabled;
1437#ifdef CONFIG_MMU
1438 struct task_struct *oom_reaper_list;
1439#endif
1440#ifdef CONFIG_VMAP_STACK
1441 struct vm_struct *stack_vm_area;
1442#endif
1443#ifdef CONFIG_THREAD_INFO_IN_TASK
1444
1445 refcount_t stack_refcount;
1446#endif
1447#ifdef CONFIG_LIVEPATCH
1448 int patch_state;
1449#endif
1450#ifdef CONFIG_SECURITY
1451
1452 void *security;
1453#endif
1454#ifdef CONFIG_BPF_SYSCALL
1455
1456 struct bpf_local_storage __rcu *bpf_storage;
1457
1458 struct bpf_run_ctx *bpf_ctx;
1459#endif
1460
1461#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1462 unsigned long lowest_stack;
1463 unsigned long prev_lowest_stack;
1464#endif
1465
1466#ifdef CONFIG_X86_MCE
1467 void __user *mce_vaddr;
1468 __u64 mce_kflags;
1469 u64 mce_addr;
1470 __u64 mce_ripv : 1,
1471 mce_whole_page : 1,
1472 __mce_reserved : 62;
1473 struct callback_head mce_kill_me;
1474 int mce_count;
1475#endif
1476
1477#ifdef CONFIG_KRETPROBES
1478 struct llist_head kretprobe_instances;
1479#endif
1480
1481#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1482
1483
1484
1485
1486
1487
1488 struct callback_head l1d_flush_kill;
1489#endif
1490
1491
1492
1493
1494
1495 randomized_struct_fields_end
1496
1497
1498 struct thread_struct thread;
1499
1500
1501
1502
1503
1504
1505
1506};
1507
1508static inline struct pid *task_pid(struct task_struct *task)
1509{
1510 return task->thread_pid;
1511}
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1525
1526static inline pid_t task_pid_nr(struct task_struct *tsk)
1527{
1528 return tsk->pid;
1529}
1530
1531static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1532{
1533 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1534}
1535
1536static inline pid_t task_pid_vnr(struct task_struct *tsk)
1537{
1538 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1539}
1540
1541
1542static inline pid_t task_tgid_nr(struct task_struct *tsk)
1543{
1544 return tsk->tgid;
1545}
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557static inline int pid_alive(const struct task_struct *p)
1558{
1559 return p->thread_pid != NULL;
1560}
1561
1562static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1563{
1564 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1565}
1566
1567static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1568{
1569 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1570}
1571
1572
1573static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1574{
1575 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1576}
1577
1578static inline pid_t task_session_vnr(struct task_struct *tsk)
1579{
1580 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1581}
1582
1583static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1584{
1585 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1586}
1587
1588static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1589{
1590 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1591}
1592
1593static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1594{
1595 pid_t pid = 0;
1596
1597 rcu_read_lock();
1598 if (pid_alive(tsk))
1599 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1600 rcu_read_unlock();
1601
1602 return pid;
1603}
1604
1605static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1606{
1607 return task_ppid_nr_ns(tsk, &init_pid_ns);
1608}
1609
1610
1611static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1612{
1613 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1614}
1615
1616#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1617#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1618
1619static inline unsigned int task_state_index(struct task_struct *tsk)
1620{
1621 unsigned int tsk_state = READ_ONCE(tsk->__state);
1622 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1623
1624 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1625
1626 if (tsk_state == TASK_IDLE)
1627 state = TASK_REPORT_IDLE;
1628
1629 return fls(state);
1630}
1631
1632static inline char task_index_to_char(unsigned int state)
1633{
1634 static const char state_char[] = "RSDTtXZPI";
1635
1636 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1637
1638 return state_char[state];
1639}
1640
1641static inline char task_state_to_char(struct task_struct *tsk)
1642{
1643 return task_index_to_char(task_state_index(tsk));
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655static inline int is_global_init(struct task_struct *tsk)
1656{
1657 return task_tgid_nr(tsk) == 1;
1658}
1659
1660extern struct pid *cad_pid;
1661
1662
1663
1664
1665#define PF_VCPU 0x00000001
1666#define PF_IDLE 0x00000002
1667#define PF_EXITING 0x00000004
1668#define PF_IO_WORKER 0x00000010
1669#define PF_WQ_WORKER 0x00000020
1670#define PF_FORKNOEXEC 0x00000040
1671#define PF_MCE_PROCESS 0x00000080
1672#define PF_SUPERPRIV 0x00000100
1673#define PF_DUMPCORE 0x00000200
1674#define PF_SIGNALED 0x00000400
1675#define PF_MEMALLOC 0x00000800
1676#define PF_NPROC_EXCEEDED 0x00001000
1677#define PF_USED_MATH 0x00002000
1678#define PF_USED_ASYNC 0x00004000
1679#define PF_NOFREEZE 0x00008000
1680#define PF_FROZEN 0x00010000
1681#define PF_KSWAPD 0x00020000
1682#define PF_MEMALLOC_NOFS 0x00040000
1683#define PF_MEMALLOC_NOIO 0x00080000
1684#define PF_LOCAL_THROTTLE 0x00100000
1685
1686#define PF_KTHREAD 0x00200000
1687#define PF_RANDOMIZE 0x00400000
1688#define PF_SWAPWRITE 0x00800000
1689#define PF_NO_SETAFFINITY 0x04000000
1690#define PF_MCE_EARLY 0x08000000
1691#define PF_MEMALLOC_PIN 0x10000000
1692#define PF_FREEZER_SKIP 0x40000000
1693#define PF_SUSPEND_TASK 0x80000000
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1707#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1708#define clear_used_math() clear_stopped_child_used_math(current)
1709#define set_used_math() set_stopped_child_used_math(current)
1710
1711#define conditional_stopped_child_used_math(condition, child) \
1712 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1713
1714#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1715
1716#define copy_to_stopped_child_used_math(child) \
1717 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1718
1719
1720#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1721#define used_math() tsk_used_math(current)
1722
1723static __always_inline bool is_percpu_thread(void)
1724{
1725#ifdef CONFIG_SMP
1726 return (current->flags & PF_NO_SETAFFINITY) &&
1727 (current->nr_cpus_allowed == 1);
1728#else
1729 return true;
1730#endif
1731}
1732
1733
1734#define PFA_NO_NEW_PRIVS 0
1735#define PFA_SPREAD_PAGE 1
1736#define PFA_SPREAD_SLAB 2
1737#define PFA_SPEC_SSB_DISABLE 3
1738#define PFA_SPEC_SSB_FORCE_DISABLE 4
1739#define PFA_SPEC_IB_DISABLE 5
1740#define PFA_SPEC_IB_FORCE_DISABLE 6
1741#define PFA_SPEC_SSB_NOEXEC 7
1742
1743#define TASK_PFA_TEST(name, func) \
1744 static inline bool task_##func(struct task_struct *p) \
1745 { return test_bit(PFA_##name, &p->atomic_flags); }
1746
1747#define TASK_PFA_SET(name, func) \
1748 static inline void task_set_##func(struct task_struct *p) \
1749 { set_bit(PFA_##name, &p->atomic_flags); }
1750
1751#define TASK_PFA_CLEAR(name, func) \
1752 static inline void task_clear_##func(struct task_struct *p) \
1753 { clear_bit(PFA_##name, &p->atomic_flags); }
1754
1755TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1756TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1757
1758TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1759TASK_PFA_SET(SPREAD_PAGE, spread_page)
1760TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1761
1762TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1763TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1764TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1765
1766TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1767TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1768TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1769
1770TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1771TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1772TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1773
1774TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1775TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1776
1777TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1778TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1779TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1780
1781TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1782TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1783
1784static inline void
1785current_restore_flags(unsigned long orig_flags, unsigned long flags)
1786{
1787 current->flags &= ~flags;
1788 current->flags |= orig_flags & flags;
1789}
1790
1791extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1792extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1793#ifdef CONFIG_SMP
1794extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1795extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1796extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1797extern void release_user_cpus_ptr(struct task_struct *p);
1798extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1799extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1800extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1801#else
1802static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1803{
1804}
1805static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1806{
1807 if (!cpumask_test_cpu(0, new_mask))
1808 return -EINVAL;
1809 return 0;
1810}
1811static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1812{
1813 if (src->user_cpus_ptr)
1814 return -EINVAL;
1815 return 0;
1816}
1817static inline void release_user_cpus_ptr(struct task_struct *p)
1818{
1819 WARN_ON(p->user_cpus_ptr);
1820}
1821
1822static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1823{
1824 return 0;
1825}
1826#endif
1827
1828extern int yield_to(struct task_struct *p, bool preempt);
1829extern void set_user_nice(struct task_struct *p, long nice);
1830extern int task_prio(const struct task_struct *p);
1831
1832
1833
1834
1835
1836
1837
1838static inline int task_nice(const struct task_struct *p)
1839{
1840 return PRIO_TO_NICE((p)->static_prio);
1841}
1842
1843extern int can_nice(const struct task_struct *p, const int nice);
1844extern int task_curr(const struct task_struct *p);
1845extern int idle_cpu(int cpu);
1846extern int available_idle_cpu(int cpu);
1847extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1848extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1849extern void sched_set_fifo(struct task_struct *p);
1850extern void sched_set_fifo_low(struct task_struct *p);
1851extern void sched_set_normal(struct task_struct *p, int nice);
1852extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1853extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1854extern struct task_struct *idle_task(int cpu);
1855
1856
1857
1858
1859
1860
1861
1862static __always_inline bool is_idle_task(const struct task_struct *p)
1863{
1864 return !!(p->flags & PF_IDLE);
1865}
1866
1867extern struct task_struct *curr_task(int cpu);
1868extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1869
1870void yield(void);
1871
1872union thread_union {
1873#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1874 struct task_struct task;
1875#endif
1876#ifndef CONFIG_THREAD_INFO_IN_TASK
1877 struct thread_info thread_info;
1878#endif
1879 unsigned long stack[THREAD_SIZE/sizeof(long)];
1880};
1881
1882#ifndef CONFIG_THREAD_INFO_IN_TASK
1883extern struct thread_info init_thread_info;
1884#endif
1885
1886extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1887
1888#ifdef CONFIG_THREAD_INFO_IN_TASK
1889static inline struct thread_info *task_thread_info(struct task_struct *task)
1890{
1891 return &task->thread_info;
1892}
1893#elif !defined(__HAVE_THREAD_FUNCTIONS)
1894# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1895#endif
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908extern struct task_struct *find_task_by_vpid(pid_t nr);
1909extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1910
1911
1912
1913
1914extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1915
1916extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1917extern int wake_up_process(struct task_struct *tsk);
1918extern void wake_up_new_task(struct task_struct *tsk);
1919
1920#ifdef CONFIG_SMP
1921extern void kick_process(struct task_struct *tsk);
1922#else
1923static inline void kick_process(struct task_struct *tsk) { }
1924#endif
1925
1926extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1927
1928static inline void set_task_comm(struct task_struct *tsk, const char *from)
1929{
1930 __set_task_comm(tsk, from, false);
1931}
1932
1933extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1934#define get_task_comm(buf, tsk) ({ \
1935 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1936 __get_task_comm(buf, sizeof(buf), tsk); \
1937})
1938
1939#ifdef CONFIG_SMP
1940static __always_inline void scheduler_ipi(void)
1941{
1942
1943
1944
1945
1946
1947 preempt_fold_need_resched();
1948}
1949extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1950#else
1951static inline void scheduler_ipi(void) { }
1952static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
1953{
1954 return 1;
1955}
1956#endif
1957
1958
1959
1960
1961
1962static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1963{
1964 set_ti_thread_flag(task_thread_info(tsk), flag);
1965}
1966
1967static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1968{
1969 clear_ti_thread_flag(task_thread_info(tsk), flag);
1970}
1971
1972static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1973 bool value)
1974{
1975 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1976}
1977
1978static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1979{
1980 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1981}
1982
1983static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1984{
1985 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1986}
1987
1988static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1989{
1990 return test_ti_thread_flag(task_thread_info(tsk), flag);
1991}
1992
1993static inline void set_tsk_need_resched(struct task_struct *tsk)
1994{
1995 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1996}
1997
1998static inline void clear_tsk_need_resched(struct task_struct *tsk)
1999{
2000 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2001}
2002
2003static inline int test_tsk_need_resched(struct task_struct *tsk)
2004{
2005 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2006}
2007
2008
2009
2010
2011
2012
2013
2014#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2015extern int __cond_resched(void);
2016
2017#ifdef CONFIG_PREEMPT_DYNAMIC
2018
2019DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2020
2021static __always_inline int _cond_resched(void)
2022{
2023 return static_call_mod(cond_resched)();
2024}
2025
2026#else
2027
2028static inline int _cond_resched(void)
2029{
2030 return __cond_resched();
2031}
2032
2033#endif
2034
2035#else
2036
2037static inline int _cond_resched(void) { return 0; }
2038
2039#endif
2040
2041#define cond_resched() ({ \
2042 ___might_sleep(__FILE__, __LINE__, 0); \
2043 _cond_resched(); \
2044})
2045
2046extern int __cond_resched_lock(spinlock_t *lock);
2047extern int __cond_resched_rwlock_read(rwlock_t *lock);
2048extern int __cond_resched_rwlock_write(rwlock_t *lock);
2049
2050#define cond_resched_lock(lock) ({ \
2051 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2052 __cond_resched_lock(lock); \
2053})
2054
2055#define cond_resched_rwlock_read(lock) ({ \
2056 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2057 __cond_resched_rwlock_read(lock); \
2058})
2059
2060#define cond_resched_rwlock_write(lock) ({ \
2061 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2062 __cond_resched_rwlock_write(lock); \
2063})
2064
2065static inline void cond_resched_rcu(void)
2066{
2067#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2068 rcu_read_unlock();
2069 cond_resched();
2070 rcu_read_lock();
2071#endif
2072}
2073
2074
2075
2076
2077
2078
2079static inline int spin_needbreak(spinlock_t *lock)
2080{
2081#ifdef CONFIG_PREEMPTION
2082 return spin_is_contended(lock);
2083#else
2084 return 0;
2085#endif
2086}
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096static inline int rwlock_needbreak(rwlock_t *lock)
2097{
2098#ifdef CONFIG_PREEMPTION
2099 return rwlock_is_contended(lock);
2100#else
2101 return 0;
2102#endif
2103}
2104
2105static __always_inline bool need_resched(void)
2106{
2107 return unlikely(tif_need_resched());
2108}
2109
2110
2111
2112
2113#ifdef CONFIG_SMP
2114
2115static inline unsigned int task_cpu(const struct task_struct *p)
2116{
2117#ifdef CONFIG_THREAD_INFO_IN_TASK
2118 return READ_ONCE(p->cpu);
2119#else
2120 return READ_ONCE(task_thread_info(p)->cpu);
2121#endif
2122}
2123
2124extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2125
2126#else
2127
2128static inline unsigned int task_cpu(const struct task_struct *p)
2129{
2130 return 0;
2131}
2132
2133static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2134{
2135}
2136
2137#endif
2138
2139extern bool sched_task_on_rq(struct task_struct *p);
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149#ifndef vcpu_is_preempted
2150static inline bool vcpu_is_preempted(int cpu)
2151{
2152 return false;
2153}
2154#endif
2155
2156extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2157extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2158
2159#ifndef TASK_SIZE_OF
2160#define TASK_SIZE_OF(tsk) TASK_SIZE
2161#endif
2162
2163#ifdef CONFIG_SMP
2164
2165unsigned long sched_cpu_util(int cpu, unsigned long max);
2166#endif
2167
2168#ifdef CONFIG_RSEQ
2169
2170
2171
2172
2173
2174enum rseq_event_mask_bits {
2175 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2176 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2177 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2178};
2179
2180enum rseq_event_mask {
2181 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
2182 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
2183 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
2184};
2185
2186static inline void rseq_set_notify_resume(struct task_struct *t)
2187{
2188 if (t->rseq)
2189 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2190}
2191
2192void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2193
2194static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2195 struct pt_regs *regs)
2196{
2197 if (current->rseq)
2198 __rseq_handle_notify_resume(ksig, regs);
2199}
2200
2201static inline void rseq_signal_deliver(struct ksignal *ksig,
2202 struct pt_regs *regs)
2203{
2204 preempt_disable();
2205 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
2206 preempt_enable();
2207 rseq_handle_notify_resume(ksig, regs);
2208}
2209
2210
2211static inline void rseq_preempt(struct task_struct *t)
2212{
2213 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2214 rseq_set_notify_resume(t);
2215}
2216
2217
2218static inline void rseq_migrate(struct task_struct *t)
2219{
2220 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2221 rseq_set_notify_resume(t);
2222}
2223
2224
2225
2226
2227
2228static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2229{
2230 if (clone_flags & CLONE_VM) {
2231 t->rseq = NULL;
2232 t->rseq_sig = 0;
2233 t->rseq_event_mask = 0;
2234 } else {
2235 t->rseq = current->rseq;
2236 t->rseq_sig = current->rseq_sig;
2237 t->rseq_event_mask = current->rseq_event_mask;
2238 }
2239}
2240
2241static inline void rseq_execve(struct task_struct *t)
2242{
2243 t->rseq = NULL;
2244 t->rseq_sig = 0;
2245 t->rseq_event_mask = 0;
2246}
2247
2248#else
2249
2250static inline void rseq_set_notify_resume(struct task_struct *t)
2251{
2252}
2253static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2254 struct pt_regs *regs)
2255{
2256}
2257static inline void rseq_signal_deliver(struct ksignal *ksig,
2258 struct pt_regs *regs)
2259{
2260}
2261static inline void rseq_preempt(struct task_struct *t)
2262{
2263}
2264static inline void rseq_migrate(struct task_struct *t)
2265{
2266}
2267static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2268{
2269}
2270static inline void rseq_execve(struct task_struct *t)
2271{
2272}
2273
2274#endif
2275
2276#ifdef CONFIG_DEBUG_RSEQ
2277
2278void rseq_syscall(struct pt_regs *regs);
2279
2280#else
2281
2282static inline void rseq_syscall(struct pt_regs *regs)
2283{
2284}
2285
2286#endif
2287
2288const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2289char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2290int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2291
2292const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2293const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2294const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2295
2296int sched_trace_rq_cpu(struct rq *rq);
2297int sched_trace_rq_cpu_capacity(struct rq *rq);
2298int sched_trace_rq_nr_running(struct rq *rq);
2299
2300const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2301
2302#ifdef CONFIG_SCHED_CORE
2303extern void sched_core_free(struct task_struct *tsk);
2304extern void sched_core_fork(struct task_struct *p);
2305extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2306 unsigned long uaddr);
2307#else
2308static inline void sched_core_free(struct task_struct *tsk) { }
2309static inline void sched_core_fork(struct task_struct *p) { }
2310#endif
2311
2312#endif
2313