1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/kcov.h>
18#include <linux/mutex.h>
19#include <linux/plist.h>
20#include <linux/hrtimer.h>
21#include <linux/irqflags.h>
22#include <linux/seccomp.h>
23#include <linux/nodemask.h>
24#include <linux/rcupdate.h>
25#include <linux/refcount.h>
26#include <linux/resource.h>
27#include <linux/latencytop.h>
28#include <linux/sched/prio.h>
29#include <linux/sched/types.h>
30#include <linux/signal_types.h>
31#include <linux/syscall_user_dispatch.h>
32#include <linux/mm_types_task.h>
33#include <linux/task_io_accounting.h>
34#include <linux/posix-timers.h>
35#include <linux/rseq.h>
36#include <linux/seqlock.h>
37#include <linux/kcsan.h>
38#include <asm/kmap_size.h>
39
40
41struct audit_context;
42struct backing_dev_info;
43struct bio_list;
44struct blk_plug;
45struct capture_control;
46struct cfs_rq;
47struct fs_struct;
48struct futex_pi_state;
49struct io_context;
50struct mempolicy;
51struct nameidata;
52struct nsproxy;
53struct perf_event_context;
54struct pid_namespace;
55struct pipe_inode_info;
56struct rcu_node;
57struct reclaim_state;
58struct robust_list_head;
59struct root_domain;
60struct rq;
61struct sched_attr;
62struct sched_param;
63struct seq_file;
64struct sighand_struct;
65struct signal_struct;
66struct task_delay_info;
67struct task_group;
68struct io_uring_task;
69
70
71
72
73
74
75
76
77
78
79
80
81
82#define TASK_RUNNING 0x0000
83#define TASK_INTERRUPTIBLE 0x0001
84#define TASK_UNINTERRUPTIBLE 0x0002
85#define __TASK_STOPPED 0x0004
86#define __TASK_TRACED 0x0008
87
88#define EXIT_DEAD 0x0010
89#define EXIT_ZOMBIE 0x0020
90#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
91
92#define TASK_PARKED 0x0040
93#define TASK_DEAD 0x0080
94#define TASK_WAKEKILL 0x0100
95#define TASK_WAKING 0x0200
96#define TASK_NOLOAD 0x0400
97#define TASK_NEW 0x0800
98#define TASK_STATE_MAX 0x1000
99
100
101#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
102#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
103#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
104
105#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
106
107
108#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
109
110
111#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
112 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
113 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
114 TASK_PARKED)
115
116#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
117
118#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
119
120#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
121
122#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
123
124
125
126
127
128#define is_special_task_state(state) \
129 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
130
131#define __set_current_state(state_value) \
132 do { \
133 WARN_ON_ONCE(is_special_task_state(state_value));\
134 current->task_state_change = _THIS_IP_; \
135 current->state = (state_value); \
136 } while (0)
137
138#define set_current_state(state_value) \
139 do { \
140 WARN_ON_ONCE(is_special_task_state(state_value));\
141 current->task_state_change = _THIS_IP_; \
142 smp_store_mb(current->state, (state_value)); \
143 } while (0)
144
145#define set_special_state(state_value) \
146 do { \
147 unsigned long flags; \
148 WARN_ON_ONCE(!is_special_task_state(state_value)); \
149 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
150 current->task_state_change = _THIS_IP_; \
151 current->state = (state_value); \
152 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
153 } while (0)
154#else
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192#define __set_current_state(state_value) \
193 current->state = (state_value)
194
195#define set_current_state(state_value) \
196 smp_store_mb(current->state, (state_value))
197
198
199
200
201
202
203
204#define set_special_state(state_value) \
205 do { \
206 unsigned long flags; \
207 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
208 current->state = (state_value); \
209 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
210 } while (0)
211
212#endif
213
214
215#define TASK_COMM_LEN 16
216
217extern void scheduler_tick(void);
218
219#define MAX_SCHEDULE_TIMEOUT LONG_MAX
220
221extern long schedule_timeout(long timeout);
222extern long schedule_timeout_interruptible(long timeout);
223extern long schedule_timeout_killable(long timeout);
224extern long schedule_timeout_uninterruptible(long timeout);
225extern long schedule_timeout_idle(long timeout);
226asmlinkage void schedule(void);
227extern void schedule_preempt_disabled(void);
228asmlinkage void preempt_schedule_irq(void);
229
230extern int __must_check io_schedule_prepare(void);
231extern void io_schedule_finish(int token);
232extern long io_schedule_timeout(long timeout);
233extern void io_schedule(void);
234
235
236
237
238
239
240
241
242
243
244struct prev_cputime {
245#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
246 u64 utime;
247 u64 stime;
248 raw_spinlock_t lock;
249#endif
250};
251
252enum vtime_state {
253
254 VTIME_INACTIVE = 0,
255
256 VTIME_IDLE,
257
258 VTIME_SYS,
259
260 VTIME_USER,
261
262 VTIME_GUEST,
263};
264
265struct vtime {
266 seqcount_t seqcount;
267 unsigned long long starttime;
268 enum vtime_state state;
269 unsigned int cpu;
270 u64 utime;
271 u64 stime;
272 u64 gtime;
273};
274
275
276
277
278
279
280
281enum uclamp_id {
282 UCLAMP_MIN = 0,
283 UCLAMP_MAX,
284 UCLAMP_CNT
285};
286
287#ifdef CONFIG_SMP
288extern struct root_domain def_root_domain;
289extern struct mutex sched_domains_mutex;
290#endif
291
292struct sched_info {
293#ifdef CONFIG_SCHED_INFO
294
295
296
297 unsigned long pcount;
298
299
300 unsigned long long run_delay;
301
302
303
304
305 unsigned long long last_arrival;
306
307
308 unsigned long long last_queued;
309
310#endif
311};
312
313
314
315
316
317
318
319
320# define SCHED_FIXEDPOINT_SHIFT 10
321# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
322
323
324# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
325# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
326
327struct load_weight {
328 unsigned long weight;
329 u32 inv_weight;
330};
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354struct util_est {
355 unsigned int enqueued;
356 unsigned int ewma;
357#define UTIL_EST_WEIGHT_SHIFT 2
358} __attribute__((__aligned__(sizeof(u64))));
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405struct sched_avg {
406 u64 last_update_time;
407 u64 load_sum;
408 u64 runnable_sum;
409 u32 util_sum;
410 u32 period_contrib;
411 unsigned long load_avg;
412 unsigned long runnable_avg;
413 unsigned long util_avg;
414 struct util_est util_est;
415} ____cacheline_aligned;
416
417struct sched_statistics {
418#ifdef CONFIG_SCHEDSTATS
419 u64 wait_start;
420 u64 wait_max;
421 u64 wait_count;
422 u64 wait_sum;
423 u64 iowait_count;
424 u64 iowait_sum;
425
426 u64 sleep_start;
427 u64 sleep_max;
428 s64 sum_sleep_runtime;
429
430 u64 block_start;
431 u64 block_max;
432 u64 exec_max;
433 u64 slice_max;
434
435 u64 nr_migrations_cold;
436 u64 nr_failed_migrations_affine;
437 u64 nr_failed_migrations_running;
438 u64 nr_failed_migrations_hot;
439 u64 nr_forced_migrations;
440
441 u64 nr_wakeups;
442 u64 nr_wakeups_sync;
443 u64 nr_wakeups_migrate;
444 u64 nr_wakeups_local;
445 u64 nr_wakeups_remote;
446 u64 nr_wakeups_affine;
447 u64 nr_wakeups_affine_attempts;
448 u64 nr_wakeups_passive;
449 u64 nr_wakeups_idle;
450#endif
451};
452
453struct sched_entity {
454
455 struct load_weight load;
456 struct rb_node run_node;
457 struct list_head group_node;
458 unsigned int on_rq;
459
460 u64 exec_start;
461 u64 sum_exec_runtime;
462 u64 vruntime;
463 u64 prev_sum_exec_runtime;
464
465 u64 nr_migrations;
466
467 struct sched_statistics statistics;
468
469#ifdef CONFIG_FAIR_GROUP_SCHED
470 int depth;
471 struct sched_entity *parent;
472
473 struct cfs_rq *cfs_rq;
474
475 struct cfs_rq *my_q;
476
477 unsigned long runnable_weight;
478#endif
479
480#ifdef CONFIG_SMP
481
482
483
484
485
486
487 struct sched_avg avg;
488#endif
489};
490
491struct sched_rt_entity {
492 struct list_head run_list;
493 unsigned long timeout;
494 unsigned long watchdog_stamp;
495 unsigned int time_slice;
496 unsigned short on_rq;
497 unsigned short on_list;
498
499 struct sched_rt_entity *back;
500#ifdef CONFIG_RT_GROUP_SCHED
501 struct sched_rt_entity *parent;
502
503 struct rt_rq *rt_rq;
504
505 struct rt_rq *my_q;
506#endif
507} __randomize_layout;
508
509struct sched_dl_entity {
510 struct rb_node rb_node;
511
512
513
514
515
516
517 u64 dl_runtime;
518 u64 dl_deadline;
519 u64 dl_period;
520 u64 dl_bw;
521 u64 dl_density;
522
523
524
525
526
527
528 s64 runtime;
529 u64 deadline;
530 unsigned int flags;
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556 unsigned int dl_throttled : 1;
557 unsigned int dl_yielded : 1;
558 unsigned int dl_non_contending : 1;
559 unsigned int dl_overrun : 1;
560
561
562
563
564
565 struct hrtimer dl_timer;
566
567
568
569
570
571
572
573
574 struct hrtimer inactive_timer;
575
576#ifdef CONFIG_RT_MUTEXES
577
578
579
580
581
582 struct sched_dl_entity *pi_se;
583#endif
584};
585
586#ifdef CONFIG_UCLAMP_TASK
587
588#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613struct uclamp_se {
614 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
615 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
616 unsigned int active : 1;
617 unsigned int user_defined : 1;
618};
619#endif
620
621union rcu_special {
622 struct {
623 u8 blocked;
624 u8 need_qs;
625 u8 exp_hint;
626 u8 need_mb;
627 } b;
628 u32 s;
629};
630
631enum perf_event_task_context {
632 perf_invalid_context = -1,
633 perf_hw_context = 0,
634 perf_sw_context,
635 perf_nr_task_contexts,
636};
637
638struct wake_q_node {
639 struct wake_q_node *next;
640};
641
642struct kmap_ctrl {
643#ifdef CONFIG_KMAP_LOCAL
644 int idx;
645 pte_t pteval[KM_MAX_IDX];
646#endif
647};
648
649struct task_struct {
650#ifdef CONFIG_THREAD_INFO_IN_TASK
651
652
653
654
655 struct thread_info thread_info;
656#endif
657
658 volatile long state;
659
660
661
662
663
664 randomized_struct_fields_start
665
666 void *stack;
667 refcount_t usage;
668
669 unsigned int flags;
670 unsigned int ptrace;
671
672#ifdef CONFIG_SMP
673 int on_cpu;
674 struct __call_single_node wake_entry;
675#ifdef CONFIG_THREAD_INFO_IN_TASK
676
677 unsigned int cpu;
678#endif
679 unsigned int wakee_flips;
680 unsigned long wakee_flip_decay_ts;
681 struct task_struct *last_wakee;
682
683
684
685
686
687
688
689
690 int recent_used_cpu;
691 int wake_cpu;
692#endif
693 int on_rq;
694
695 int prio;
696 int static_prio;
697 int normal_prio;
698 unsigned int rt_priority;
699
700 const struct sched_class *sched_class;
701 struct sched_entity se;
702 struct sched_rt_entity rt;
703#ifdef CONFIG_CGROUP_SCHED
704 struct task_group *sched_task_group;
705#endif
706 struct sched_dl_entity dl;
707
708#ifdef CONFIG_UCLAMP_TASK
709
710
711
712
713 struct uclamp_se uclamp_req[UCLAMP_CNT];
714
715
716
717
718 struct uclamp_se uclamp[UCLAMP_CNT];
719#endif
720
721#ifdef CONFIG_PREEMPT_NOTIFIERS
722
723 struct hlist_head preempt_notifiers;
724#endif
725
726#ifdef CONFIG_BLK_DEV_IO_TRACE
727 unsigned int btrace_seq;
728#endif
729
730 unsigned int policy;
731 int nr_cpus_allowed;
732 const cpumask_t *cpus_ptr;
733 cpumask_t cpus_mask;
734 void *migration_pending;
735#ifdef CONFIG_SMP
736 unsigned short migration_disabled;
737#endif
738 unsigned short migration_flags;
739
740#ifdef CONFIG_PREEMPT_RCU
741 int rcu_read_lock_nesting;
742 union rcu_special rcu_read_unlock_special;
743 struct list_head rcu_node_entry;
744 struct rcu_node *rcu_blocked_node;
745#endif
746
747#ifdef CONFIG_TASKS_RCU
748 unsigned long rcu_tasks_nvcsw;
749 u8 rcu_tasks_holdout;
750 u8 rcu_tasks_idx;
751 int rcu_tasks_idle_cpu;
752 struct list_head rcu_tasks_holdout_list;
753#endif
754
755#ifdef CONFIG_TASKS_TRACE_RCU
756 int trc_reader_nesting;
757 int trc_ipi_to_cpu;
758 union rcu_special trc_reader_special;
759 bool trc_reader_checked;
760 struct list_head trc_holdout_list;
761#endif
762
763 struct sched_info sched_info;
764
765 struct list_head tasks;
766#ifdef CONFIG_SMP
767 struct plist_node pushable_tasks;
768 struct rb_node pushable_dl_tasks;
769#endif
770
771 struct mm_struct *mm;
772 struct mm_struct *active_mm;
773
774
775 struct vmacache vmacache;
776
777#ifdef SPLIT_RSS_COUNTING
778 struct task_rss_stat rss_stat;
779#endif
780 int exit_state;
781 int exit_code;
782 int exit_signal;
783
784 int pdeath_signal;
785
786 unsigned long jobctl;
787
788
789 unsigned int personality;
790
791
792 unsigned sched_reset_on_fork:1;
793 unsigned sched_contributes_to_load:1;
794 unsigned sched_migrated:1;
795#ifdef CONFIG_PSI
796 unsigned sched_psi_wake_requeue:1;
797#endif
798
799
800 unsigned :0;
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817 unsigned sched_remote_wakeup:1;
818
819
820 unsigned in_execve:1;
821 unsigned in_iowait:1;
822#ifndef TIF_RESTORE_SIGMASK
823 unsigned restore_sigmask:1;
824#endif
825#ifdef CONFIG_MEMCG
826 unsigned in_user_fault:1;
827#endif
828#ifdef CONFIG_COMPAT_BRK
829 unsigned brk_randomized:1;
830#endif
831#ifdef CONFIG_CGROUPS
832
833 unsigned no_cgroup_migration:1;
834
835 unsigned frozen:1;
836#endif
837#ifdef CONFIG_BLK_CGROUP
838 unsigned use_memdelay:1;
839#endif
840#ifdef CONFIG_PSI
841
842 unsigned in_memstall:1;
843#endif
844
845 unsigned long atomic_flags;
846
847 struct restart_block restart_block;
848
849 pid_t pid;
850 pid_t tgid;
851
852#ifdef CONFIG_STACKPROTECTOR
853
854 unsigned long stack_canary;
855#endif
856
857
858
859
860
861
862
863 struct task_struct __rcu *real_parent;
864
865
866 struct task_struct __rcu *parent;
867
868
869
870
871 struct list_head children;
872 struct list_head sibling;
873 struct task_struct *group_leader;
874
875
876
877
878
879
880
881 struct list_head ptraced;
882 struct list_head ptrace_entry;
883
884
885 struct pid *thread_pid;
886 struct hlist_node pid_links[PIDTYPE_MAX];
887 struct list_head thread_group;
888 struct list_head thread_node;
889
890 struct completion *vfork_done;
891
892
893 int __user *set_child_tid;
894
895
896 int __user *clear_child_tid;
897
898 u64 utime;
899 u64 stime;
900#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
901 u64 utimescaled;
902 u64 stimescaled;
903#endif
904 u64 gtime;
905 struct prev_cputime prev_cputime;
906#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
907 struct vtime vtime;
908#endif
909
910#ifdef CONFIG_NO_HZ_FULL
911 atomic_t tick_dep_mask;
912#endif
913
914 unsigned long nvcsw;
915 unsigned long nivcsw;
916
917
918 u64 start_time;
919
920
921 u64 start_boottime;
922
923
924 unsigned long min_flt;
925 unsigned long maj_flt;
926
927
928 struct posix_cputimers posix_cputimers;
929
930#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
931 struct posix_cputimers_work posix_cputimers_work;
932#endif
933
934
935
936
937 const struct cred __rcu *ptracer_cred;
938
939
940 const struct cred __rcu *real_cred;
941
942
943 const struct cred __rcu *cred;
944
945#ifdef CONFIG_KEYS
946
947 struct key *cached_requested_key;
948#endif
949
950
951
952
953
954
955
956
957 char comm[TASK_COMM_LEN];
958
959 struct nameidata *nameidata;
960
961#ifdef CONFIG_SYSVIPC
962 struct sysv_sem sysvsem;
963 struct sysv_shm sysvshm;
964#endif
965#ifdef CONFIG_DETECT_HUNG_TASK
966 unsigned long last_switch_count;
967 unsigned long last_switch_time;
968#endif
969
970 struct fs_struct *fs;
971
972
973 struct files_struct *files;
974
975#ifdef CONFIG_IO_URING
976 struct io_uring_task *io_uring;
977#endif
978
979
980 struct nsproxy *nsproxy;
981
982
983 struct signal_struct *signal;
984 struct sighand_struct __rcu *sighand;
985 sigset_t blocked;
986 sigset_t real_blocked;
987
988 sigset_t saved_sigmask;
989 struct sigpending pending;
990 unsigned long sas_ss_sp;
991 size_t sas_ss_size;
992 unsigned int sas_ss_flags;
993
994 struct callback_head *task_works;
995
996#ifdef CONFIG_AUDIT
997#ifdef CONFIG_AUDITSYSCALL
998 struct audit_context *audit_context;
999#endif
1000 kuid_t loginuid;
1001 unsigned int sessionid;
1002#endif
1003 struct seccomp seccomp;
1004 struct syscall_user_dispatch syscall_dispatch;
1005
1006
1007 u64 parent_exec_id;
1008 u64 self_exec_id;
1009
1010
1011 spinlock_t alloc_lock;
1012
1013
1014 raw_spinlock_t pi_lock;
1015
1016 struct wake_q_node wake_q;
1017
1018#ifdef CONFIG_RT_MUTEXES
1019
1020 struct rb_root_cached pi_waiters;
1021
1022 struct task_struct *pi_top_task;
1023
1024 struct rt_mutex_waiter *pi_blocked_on;
1025#endif
1026
1027#ifdef CONFIG_DEBUG_MUTEXES
1028
1029 struct mutex_waiter *blocked_on;
1030#endif
1031
1032#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1033 int non_block_count;
1034#endif
1035
1036#ifdef CONFIG_TRACE_IRQFLAGS
1037 struct irqtrace_events irqtrace;
1038 unsigned int hardirq_threaded;
1039 u64 hardirq_chain_key;
1040 int softirqs_enabled;
1041 int softirq_context;
1042 int irq_config;
1043#endif
1044
1045#ifdef CONFIG_LOCKDEP
1046# define MAX_LOCK_DEPTH 48UL
1047 u64 curr_chain_key;
1048 int lockdep_depth;
1049 unsigned int lockdep_recursion;
1050 struct held_lock held_locks[MAX_LOCK_DEPTH];
1051#endif
1052
1053#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1054 unsigned int in_ubsan;
1055#endif
1056
1057
1058 void *journal_info;
1059
1060
1061 struct bio_list *bio_list;
1062
1063#ifdef CONFIG_BLOCK
1064
1065 struct blk_plug *plug;
1066#endif
1067
1068
1069 struct reclaim_state *reclaim_state;
1070
1071 struct backing_dev_info *backing_dev_info;
1072
1073 struct io_context *io_context;
1074
1075#ifdef CONFIG_COMPACTION
1076 struct capture_control *capture_control;
1077#endif
1078
1079 unsigned long ptrace_message;
1080 kernel_siginfo_t *last_siginfo;
1081
1082 struct task_io_accounting ioac;
1083#ifdef CONFIG_PSI
1084
1085 unsigned int psi_flags;
1086#endif
1087#ifdef CONFIG_TASK_XACCT
1088
1089 u64 acct_rss_mem1;
1090
1091 u64 acct_vm_mem1;
1092
1093 u64 acct_timexpd;
1094#endif
1095#ifdef CONFIG_CPUSETS
1096
1097 nodemask_t mems_allowed;
1098
1099 seqcount_spinlock_t mems_allowed_seq;
1100 int cpuset_mem_spread_rotor;
1101 int cpuset_slab_spread_rotor;
1102#endif
1103#ifdef CONFIG_CGROUPS
1104
1105 struct css_set __rcu *cgroups;
1106
1107 struct list_head cg_list;
1108#endif
1109#ifdef CONFIG_X86_CPU_RESCTRL
1110 u32 closid;
1111 u32 rmid;
1112#endif
1113#ifdef CONFIG_FUTEX
1114 struct robust_list_head __user *robust_list;
1115#ifdef CONFIG_COMPAT
1116 struct compat_robust_list_head __user *compat_robust_list;
1117#endif
1118 struct list_head pi_state_list;
1119 struct futex_pi_state *pi_state_cache;
1120 struct mutex futex_exit_mutex;
1121 unsigned int futex_state;
1122#endif
1123#ifdef CONFIG_PERF_EVENTS
1124 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1125 struct mutex perf_event_mutex;
1126 struct list_head perf_event_list;
1127#endif
1128#ifdef CONFIG_DEBUG_PREEMPT
1129 unsigned long preempt_disable_ip;
1130#endif
1131#ifdef CONFIG_NUMA
1132
1133 struct mempolicy *mempolicy;
1134 short il_prev;
1135 short pref_node_fork;
1136#endif
1137#ifdef CONFIG_NUMA_BALANCING
1138 int numa_scan_seq;
1139 unsigned int numa_scan_period;
1140 unsigned int numa_scan_period_max;
1141 int numa_preferred_nid;
1142 unsigned long numa_migrate_retry;
1143
1144 u64 node_stamp;
1145 u64 last_task_numa_placement;
1146 u64 last_sum_exec_runtime;
1147 struct callback_head numa_work;
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157 struct numa_group __rcu *numa_group;
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173 unsigned long *numa_faults;
1174 unsigned long total_numa_faults;
1175
1176
1177
1178
1179
1180
1181
1182 unsigned long numa_faults_locality[3];
1183
1184 unsigned long numa_pages_migrated;
1185#endif
1186
1187#ifdef CONFIG_RSEQ
1188 struct rseq __user *rseq;
1189 u32 rseq_sig;
1190
1191
1192
1193
1194 unsigned long rseq_event_mask;
1195#endif
1196
1197 struct tlbflush_unmap_batch tlb_ubc;
1198
1199 union {
1200 refcount_t rcu_users;
1201 struct rcu_head rcu;
1202 };
1203
1204
1205 struct pipe_inode_info *splice_pipe;
1206
1207 struct page_frag task_frag;
1208
1209#ifdef CONFIG_TASK_DELAY_ACCT
1210 struct task_delay_info *delays;
1211#endif
1212
1213#ifdef CONFIG_FAULT_INJECTION
1214 int make_it_fail;
1215 unsigned int fail_nth;
1216#endif
1217
1218
1219
1220
1221 int nr_dirtied;
1222 int nr_dirtied_pause;
1223
1224 unsigned long dirty_paused_when;
1225
1226#ifdef CONFIG_LATENCYTOP
1227 int latency_record_count;
1228 struct latency_record latency_record[LT_SAVECOUNT];
1229#endif
1230
1231
1232
1233
1234 u64 timer_slack_ns;
1235 u64 default_timer_slack_ns;
1236
1237#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1238 unsigned int kasan_depth;
1239#endif
1240
1241#ifdef CONFIG_KCSAN
1242 struct kcsan_ctx kcsan_ctx;
1243#ifdef CONFIG_TRACE_IRQFLAGS
1244 struct irqtrace_events kcsan_save_irqtrace;
1245#endif
1246#endif
1247
1248#if IS_ENABLED(CONFIG_KUNIT)
1249 struct kunit *kunit_test;
1250#endif
1251
1252#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1253
1254 int curr_ret_stack;
1255 int curr_ret_depth;
1256
1257
1258 struct ftrace_ret_stack *ret_stack;
1259
1260
1261 unsigned long long ftrace_timestamp;
1262
1263
1264
1265
1266
1267 atomic_t trace_overrun;
1268
1269
1270 atomic_t tracing_graph_pause;
1271#endif
1272
1273#ifdef CONFIG_TRACING
1274
1275 unsigned long trace;
1276
1277
1278 unsigned long trace_recursion;
1279#endif
1280
1281#ifdef CONFIG_KCOV
1282
1283
1284
1285 unsigned int kcov_mode;
1286
1287
1288 unsigned int kcov_size;
1289
1290
1291 void *kcov_area;
1292
1293
1294 struct kcov *kcov;
1295
1296
1297 u64 kcov_handle;
1298
1299
1300 int kcov_sequence;
1301
1302
1303 unsigned int kcov_softirq;
1304#endif
1305
1306#ifdef CONFIG_MEMCG
1307 struct mem_cgroup *memcg_in_oom;
1308 gfp_t memcg_oom_gfp_mask;
1309 int memcg_oom_order;
1310
1311
1312 unsigned int memcg_nr_pages_over_high;
1313
1314
1315 struct mem_cgroup *active_memcg;
1316#endif
1317
1318#ifdef CONFIG_BLK_CGROUP
1319 struct request_queue *throttle_queue;
1320#endif
1321
1322#ifdef CONFIG_UPROBES
1323 struct uprobe_task *utask;
1324#endif
1325#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1326 unsigned int sequential_io;
1327 unsigned int sequential_io_avg;
1328#endif
1329 struct kmap_ctrl kmap_ctrl;
1330#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1331 unsigned long task_state_change;
1332#endif
1333 int pagefault_disabled;
1334#ifdef CONFIG_MMU
1335 struct task_struct *oom_reaper_list;
1336#endif
1337#ifdef CONFIG_VMAP_STACK
1338 struct vm_struct *stack_vm_area;
1339#endif
1340#ifdef CONFIG_THREAD_INFO_IN_TASK
1341
1342 refcount_t stack_refcount;
1343#endif
1344#ifdef CONFIG_LIVEPATCH
1345 int patch_state;
1346#endif
1347#ifdef CONFIG_SECURITY
1348
1349 void *security;
1350#endif
1351
1352#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1353 unsigned long lowest_stack;
1354 unsigned long prev_lowest_stack;
1355#endif
1356
1357#ifdef CONFIG_X86_MCE
1358 void __user *mce_vaddr;
1359 __u64 mce_kflags;
1360 u64 mce_addr;
1361 __u64 mce_ripv : 1,
1362 mce_whole_page : 1,
1363 __mce_reserved : 62;
1364 struct callback_head mce_kill_me;
1365#endif
1366
1367#ifdef CONFIG_KRETPROBES
1368 struct llist_head kretprobe_instances;
1369#endif
1370
1371
1372
1373
1374
1375 randomized_struct_fields_end
1376
1377
1378 struct thread_struct thread;
1379
1380
1381
1382
1383
1384
1385
1386};
1387
1388static inline struct pid *task_pid(struct task_struct *task)
1389{
1390 return task->thread_pid;
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1405
1406static inline pid_t task_pid_nr(struct task_struct *tsk)
1407{
1408 return tsk->pid;
1409}
1410
1411static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1412{
1413 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1414}
1415
1416static inline pid_t task_pid_vnr(struct task_struct *tsk)
1417{
1418 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1419}
1420
1421
1422static inline pid_t task_tgid_nr(struct task_struct *tsk)
1423{
1424 return tsk->tgid;
1425}
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437static inline int pid_alive(const struct task_struct *p)
1438{
1439 return p->thread_pid != NULL;
1440}
1441
1442static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1443{
1444 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1445}
1446
1447static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1448{
1449 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1450}
1451
1452
1453static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1454{
1455 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1456}
1457
1458static inline pid_t task_session_vnr(struct task_struct *tsk)
1459{
1460 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1461}
1462
1463static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1464{
1465 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1466}
1467
1468static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1469{
1470 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1471}
1472
1473static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1474{
1475 pid_t pid = 0;
1476
1477 rcu_read_lock();
1478 if (pid_alive(tsk))
1479 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1480 rcu_read_unlock();
1481
1482 return pid;
1483}
1484
1485static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1486{
1487 return task_ppid_nr_ns(tsk, &init_pid_ns);
1488}
1489
1490
1491static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1492{
1493 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1494}
1495
1496#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1497#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1498
1499static inline unsigned int task_state_index(struct task_struct *tsk)
1500{
1501 unsigned int tsk_state = READ_ONCE(tsk->state);
1502 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1503
1504 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1505
1506 if (tsk_state == TASK_IDLE)
1507 state = TASK_REPORT_IDLE;
1508
1509 return fls(state);
1510}
1511
1512static inline char task_index_to_char(unsigned int state)
1513{
1514 static const char state_char[] = "RSDTtXZPI";
1515
1516 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1517
1518 return state_char[state];
1519}
1520
1521static inline char task_state_to_char(struct task_struct *tsk)
1522{
1523 return task_index_to_char(task_state_index(tsk));
1524}
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535static inline int is_global_init(struct task_struct *tsk)
1536{
1537 return task_tgid_nr(tsk) == 1;
1538}
1539
1540extern struct pid *cad_pid;
1541
1542
1543
1544
1545#define PF_VCPU 0x00000001
1546#define PF_IDLE 0x00000002
1547#define PF_EXITING 0x00000004
1548#define PF_IO_WORKER 0x00000010
1549#define PF_WQ_WORKER 0x00000020
1550#define PF_FORKNOEXEC 0x00000040
1551#define PF_MCE_PROCESS 0x00000080
1552#define PF_SUPERPRIV 0x00000100
1553#define PF_DUMPCORE 0x00000200
1554#define PF_SIGNALED 0x00000400
1555#define PF_MEMALLOC 0x00000800
1556#define PF_NPROC_EXCEEDED 0x00001000
1557#define PF_USED_MATH 0x00002000
1558#define PF_USED_ASYNC 0x00004000
1559#define PF_NOFREEZE 0x00008000
1560#define PF_FROZEN 0x00010000
1561#define PF_KSWAPD 0x00020000
1562#define PF_MEMALLOC_NOFS 0x00040000
1563#define PF_MEMALLOC_NOIO 0x00080000
1564#define PF_LOCAL_THROTTLE 0x00100000
1565
1566#define PF_KTHREAD 0x00200000
1567#define PF_RANDOMIZE 0x00400000
1568#define PF_SWAPWRITE 0x00800000
1569#define PF_NO_SETAFFINITY 0x04000000
1570#define PF_MCE_EARLY 0x08000000
1571#define PF_MEMALLOC_NOCMA 0x10000000
1572#define PF_FREEZER_SKIP 0x40000000
1573#define PF_SUSPEND_TASK 0x80000000
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1587#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1588#define clear_used_math() clear_stopped_child_used_math(current)
1589#define set_used_math() set_stopped_child_used_math(current)
1590
1591#define conditional_stopped_child_used_math(condition, child) \
1592 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1593
1594#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1595
1596#define copy_to_stopped_child_used_math(child) \
1597 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1598
1599
1600#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1601#define used_math() tsk_used_math(current)
1602
1603static inline bool is_percpu_thread(void)
1604{
1605#ifdef CONFIG_SMP
1606 return (current->flags & PF_NO_SETAFFINITY) &&
1607 (current->nr_cpus_allowed == 1);
1608#else
1609 return true;
1610#endif
1611}
1612
1613
1614#define PFA_NO_NEW_PRIVS 0
1615#define PFA_SPREAD_PAGE 1
1616#define PFA_SPREAD_SLAB 2
1617#define PFA_SPEC_SSB_DISABLE 3
1618#define PFA_SPEC_SSB_FORCE_DISABLE 4
1619#define PFA_SPEC_IB_DISABLE 5
1620#define PFA_SPEC_IB_FORCE_DISABLE 6
1621#define PFA_SPEC_SSB_NOEXEC 7
1622
1623#define TASK_PFA_TEST(name, func) \
1624 static inline bool task_##func(struct task_struct *p) \
1625 { return test_bit(PFA_##name, &p->atomic_flags); }
1626
1627#define TASK_PFA_SET(name, func) \
1628 static inline void task_set_##func(struct task_struct *p) \
1629 { set_bit(PFA_##name, &p->atomic_flags); }
1630
1631#define TASK_PFA_CLEAR(name, func) \
1632 static inline void task_clear_##func(struct task_struct *p) \
1633 { clear_bit(PFA_##name, &p->atomic_flags); }
1634
1635TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1636TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1637
1638TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1639TASK_PFA_SET(SPREAD_PAGE, spread_page)
1640TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1641
1642TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1643TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1644TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1645
1646TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1647TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1648TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1649
1650TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1651TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1652TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1653
1654TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1655TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1656
1657TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1658TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1659TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1660
1661TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1662TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1663
1664static inline void
1665current_restore_flags(unsigned long orig_flags, unsigned long flags)
1666{
1667 current->flags &= ~flags;
1668 current->flags |= orig_flags & flags;
1669}
1670
1671extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1672extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1673#ifdef CONFIG_SMP
1674extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1675extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1676#else
1677static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1678{
1679}
1680static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1681{
1682 if (!cpumask_test_cpu(0, new_mask))
1683 return -EINVAL;
1684 return 0;
1685}
1686#endif
1687
1688extern int yield_to(struct task_struct *p, bool preempt);
1689extern void set_user_nice(struct task_struct *p, long nice);
1690extern int task_prio(const struct task_struct *p);
1691
1692
1693
1694
1695
1696
1697
1698static inline int task_nice(const struct task_struct *p)
1699{
1700 return PRIO_TO_NICE((p)->static_prio);
1701}
1702
1703extern int can_nice(const struct task_struct *p, const int nice);
1704extern int task_curr(const struct task_struct *p);
1705extern int idle_cpu(int cpu);
1706extern int available_idle_cpu(int cpu);
1707extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1708extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1709extern void sched_set_fifo(struct task_struct *p);
1710extern void sched_set_fifo_low(struct task_struct *p);
1711extern void sched_set_normal(struct task_struct *p, int nice);
1712extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1713extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1714extern struct task_struct *idle_task(int cpu);
1715
1716
1717
1718
1719
1720
1721
1722static __always_inline bool is_idle_task(const struct task_struct *p)
1723{
1724 return !!(p->flags & PF_IDLE);
1725}
1726
1727extern struct task_struct *curr_task(int cpu);
1728extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1729
1730void yield(void);
1731
1732union thread_union {
1733#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1734 struct task_struct task;
1735#endif
1736#ifndef CONFIG_THREAD_INFO_IN_TASK
1737 struct thread_info thread_info;
1738#endif
1739 unsigned long stack[THREAD_SIZE/sizeof(long)];
1740};
1741
1742#ifndef CONFIG_THREAD_INFO_IN_TASK
1743extern struct thread_info init_thread_info;
1744#endif
1745
1746extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1747
1748#ifdef CONFIG_THREAD_INFO_IN_TASK
1749static inline struct thread_info *task_thread_info(struct task_struct *task)
1750{
1751 return &task->thread_info;
1752}
1753#elif !defined(__HAVE_THREAD_FUNCTIONS)
1754# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1755#endif
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768extern struct task_struct *find_task_by_vpid(pid_t nr);
1769extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1770
1771
1772
1773
1774extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1775
1776extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1777extern int wake_up_process(struct task_struct *tsk);
1778extern void wake_up_new_task(struct task_struct *tsk);
1779
1780#ifdef CONFIG_SMP
1781extern void kick_process(struct task_struct *tsk);
1782#else
1783static inline void kick_process(struct task_struct *tsk) { }
1784#endif
1785
1786extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1787
1788static inline void set_task_comm(struct task_struct *tsk, const char *from)
1789{
1790 __set_task_comm(tsk, from, false);
1791}
1792
1793extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1794#define get_task_comm(buf, tsk) ({ \
1795 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1796 __get_task_comm(buf, sizeof(buf), tsk); \
1797})
1798
1799#ifdef CONFIG_SMP
1800static __always_inline void scheduler_ipi(void)
1801{
1802
1803
1804
1805
1806
1807 preempt_fold_need_resched();
1808}
1809extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1810#else
1811static inline void scheduler_ipi(void) { }
1812static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1813{
1814 return 1;
1815}
1816#endif
1817
1818
1819
1820
1821
1822static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1823{
1824 set_ti_thread_flag(task_thread_info(tsk), flag);
1825}
1826
1827static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1828{
1829 clear_ti_thread_flag(task_thread_info(tsk), flag);
1830}
1831
1832static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1833 bool value)
1834{
1835 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1836}
1837
1838static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1839{
1840 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1841}
1842
1843static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1844{
1845 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1846}
1847
1848static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1849{
1850 return test_ti_thread_flag(task_thread_info(tsk), flag);
1851}
1852
1853static inline void set_tsk_need_resched(struct task_struct *tsk)
1854{
1855 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1856}
1857
1858static inline void clear_tsk_need_resched(struct task_struct *tsk)
1859{
1860 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1861}
1862
1863static inline int test_tsk_need_resched(struct task_struct *tsk)
1864{
1865 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1866}
1867
1868
1869
1870
1871
1872
1873
1874#ifndef CONFIG_PREEMPTION
1875extern int _cond_resched(void);
1876#else
1877static inline int _cond_resched(void) { return 0; }
1878#endif
1879
1880#define cond_resched() ({ \
1881 ___might_sleep(__FILE__, __LINE__, 0); \
1882 _cond_resched(); \
1883})
1884
1885extern int __cond_resched_lock(spinlock_t *lock);
1886
1887#define cond_resched_lock(lock) ({ \
1888 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1889 __cond_resched_lock(lock); \
1890})
1891
1892static inline void cond_resched_rcu(void)
1893{
1894#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1895 rcu_read_unlock();
1896 cond_resched();
1897 rcu_read_lock();
1898#endif
1899}
1900
1901
1902
1903
1904
1905
1906static inline int spin_needbreak(spinlock_t *lock)
1907{
1908#ifdef CONFIG_PREEMPTION
1909 return spin_is_contended(lock);
1910#else
1911 return 0;
1912#endif
1913}
1914
1915static __always_inline bool need_resched(void)
1916{
1917 return unlikely(tif_need_resched());
1918}
1919
1920
1921
1922
1923#ifdef CONFIG_SMP
1924
1925static inline unsigned int task_cpu(const struct task_struct *p)
1926{
1927#ifdef CONFIG_THREAD_INFO_IN_TASK
1928 return READ_ONCE(p->cpu);
1929#else
1930 return READ_ONCE(task_thread_info(p)->cpu);
1931#endif
1932}
1933
1934extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1935
1936#else
1937
1938static inline unsigned int task_cpu(const struct task_struct *p)
1939{
1940 return 0;
1941}
1942
1943static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1944{
1945}
1946
1947#endif
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957#ifndef vcpu_is_preempted
1958static inline bool vcpu_is_preempted(int cpu)
1959{
1960 return false;
1961}
1962#endif
1963
1964extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1965extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1966
1967#ifndef TASK_SIZE_OF
1968#define TASK_SIZE_OF(tsk) TASK_SIZE
1969#endif
1970
1971#ifdef CONFIG_RSEQ
1972
1973
1974
1975
1976
1977enum rseq_event_mask_bits {
1978 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1979 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1980 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1981};
1982
1983enum rseq_event_mask {
1984 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
1985 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
1986 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
1987};
1988
1989static inline void rseq_set_notify_resume(struct task_struct *t)
1990{
1991 if (t->rseq)
1992 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1993}
1994
1995void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1996
1997static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1998 struct pt_regs *regs)
1999{
2000 if (current->rseq)
2001 __rseq_handle_notify_resume(ksig, regs);
2002}
2003
2004static inline void rseq_signal_deliver(struct ksignal *ksig,
2005 struct pt_regs *regs)
2006{
2007 preempt_disable();
2008 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
2009 preempt_enable();
2010 rseq_handle_notify_resume(ksig, regs);
2011}
2012
2013
2014static inline void rseq_preempt(struct task_struct *t)
2015{
2016 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2017 rseq_set_notify_resume(t);
2018}
2019
2020
2021static inline void rseq_migrate(struct task_struct *t)
2022{
2023 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2024 rseq_set_notify_resume(t);
2025}
2026
2027
2028
2029
2030
2031static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2032{
2033 if (clone_flags & CLONE_VM) {
2034 t->rseq = NULL;
2035 t->rseq_sig = 0;
2036 t->rseq_event_mask = 0;
2037 } else {
2038 t->rseq = current->rseq;
2039 t->rseq_sig = current->rseq_sig;
2040 t->rseq_event_mask = current->rseq_event_mask;
2041 }
2042}
2043
2044static inline void rseq_execve(struct task_struct *t)
2045{
2046 t->rseq = NULL;
2047 t->rseq_sig = 0;
2048 t->rseq_event_mask = 0;
2049}
2050
2051#else
2052
2053static inline void rseq_set_notify_resume(struct task_struct *t)
2054{
2055}
2056static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2057 struct pt_regs *regs)
2058{
2059}
2060static inline void rseq_signal_deliver(struct ksignal *ksig,
2061 struct pt_regs *regs)
2062{
2063}
2064static inline void rseq_preempt(struct task_struct *t)
2065{
2066}
2067static inline void rseq_migrate(struct task_struct *t)
2068{
2069}
2070static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2071{
2072}
2073static inline void rseq_execve(struct task_struct *t)
2074{
2075}
2076
2077#endif
2078
2079#ifdef CONFIG_DEBUG_RSEQ
2080
2081void rseq_syscall(struct pt_regs *regs);
2082
2083#else
2084
2085static inline void rseq_syscall(struct pt_regs *regs)
2086{
2087}
2088
2089#endif
2090
2091const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2092char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2093int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2094
2095const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2096const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2097const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2098
2099int sched_trace_rq_cpu(struct rq *rq);
2100int sched_trace_rq_cpu_capacity(struct rq *rq);
2101int sched_trace_rq_nr_running(struct rq *rq);
2102
2103const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2104
2105#endif
2106