1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/kcov.h>
18#include <linux/mutex.h>
19#include <linux/plist.h>
20#include <linux/hrtimer.h>
21#include <linux/irqflags.h>
22#include <linux/seccomp.h>
23#include <linux/nodemask.h>
24#include <linux/rcupdate.h>
25#include <linux/refcount.h>
26#include <linux/resource.h>
27#include <linux/latencytop.h>
28#include <linux/sched/prio.h>
29#include <linux/sched/types.h>
30#include <linux/signal_types.h>
31#include <linux/mm_types_task.h>
32#include <linux/task_io_accounting.h>
33#include <linux/posix-timers.h>
34#include <linux/rseq.h>
35#include <linux/seqlock.h>
36#include <linux/kcsan.h>
37
38
39struct audit_context;
40struct backing_dev_info;
41struct bio_list;
42struct blk_plug;
43struct capture_control;
44struct cfs_rq;
45struct fs_struct;
46struct futex_pi_state;
47struct io_context;
48struct mempolicy;
49struct nameidata;
50struct nsproxy;
51struct perf_event_context;
52struct pid_namespace;
53struct pipe_inode_info;
54struct rcu_node;
55struct reclaim_state;
56struct robust_list_head;
57struct root_domain;
58struct rq;
59struct sched_attr;
60struct sched_param;
61struct seq_file;
62struct sighand_struct;
63struct signal_struct;
64struct task_delay_info;
65struct task_group;
66struct io_uring_task;
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define TASK_RUNNING 0x0000
81#define TASK_INTERRUPTIBLE 0x0001
82#define TASK_UNINTERRUPTIBLE 0x0002
83#define __TASK_STOPPED 0x0004
84#define __TASK_TRACED 0x0008
85
86#define EXIT_DEAD 0x0010
87#define EXIT_ZOMBIE 0x0020
88#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
89
90#define TASK_PARKED 0x0040
91#define TASK_DEAD 0x0080
92#define TASK_WAKEKILL 0x0100
93#define TASK_WAKING 0x0200
94#define TASK_NOLOAD 0x0400
95#define TASK_NEW 0x0800
96#define TASK_STATE_MAX 0x1000
97
98
99#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
100#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
101#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
102
103#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
104
105
106#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
107
108
109#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
110 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
111 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
112 TASK_PARKED)
113
114#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
115
116#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
117
118#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
119
120#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
121
122
123
124
125
126#define is_special_task_state(state) \
127 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
128
129#define __set_current_state(state_value) \
130 do { \
131 WARN_ON_ONCE(is_special_task_state(state_value));\
132 current->task_state_change = _THIS_IP_; \
133 current->state = (state_value); \
134 } while (0)
135
136#define set_current_state(state_value) \
137 do { \
138 WARN_ON_ONCE(is_special_task_state(state_value));\
139 current->task_state_change = _THIS_IP_; \
140 smp_store_mb(current->state, (state_value)); \
141 } while (0)
142
143#define set_special_state(state_value) \
144 do { \
145 unsigned long flags; \
146 WARN_ON_ONCE(!is_special_task_state(state_value)); \
147 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
148 current->task_state_change = _THIS_IP_; \
149 current->state = (state_value); \
150 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
151 } while (0)
152#else
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190#define __set_current_state(state_value) \
191 current->state = (state_value)
192
193#define set_current_state(state_value) \
194 smp_store_mb(current->state, (state_value))
195
196
197
198
199
200
201
202#define set_special_state(state_value) \
203 do { \
204 unsigned long flags; \
205 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
206 current->state = (state_value); \
207 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
208 } while (0)
209
210#endif
211
212
213#define TASK_COMM_LEN 16
214
215extern void scheduler_tick(void);
216
217#define MAX_SCHEDULE_TIMEOUT LONG_MAX
218
219extern long schedule_timeout(long timeout);
220extern long schedule_timeout_interruptible(long timeout);
221extern long schedule_timeout_killable(long timeout);
222extern long schedule_timeout_uninterruptible(long timeout);
223extern long schedule_timeout_idle(long timeout);
224asmlinkage void schedule(void);
225extern void schedule_preempt_disabled(void);
226asmlinkage void preempt_schedule_irq(void);
227
228extern int __must_check io_schedule_prepare(void);
229extern void io_schedule_finish(int token);
230extern long io_schedule_timeout(long timeout);
231extern void io_schedule(void);
232
233
234
235
236
237
238
239
240
241
242struct prev_cputime {
243#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
244 u64 utime;
245 u64 stime;
246 raw_spinlock_t lock;
247#endif
248};
249
250enum vtime_state {
251
252 VTIME_INACTIVE = 0,
253
254 VTIME_IDLE,
255
256 VTIME_SYS,
257
258 VTIME_USER,
259
260 VTIME_GUEST,
261};
262
263struct vtime {
264 seqcount_t seqcount;
265 unsigned long long starttime;
266 enum vtime_state state;
267 unsigned int cpu;
268 u64 utime;
269 u64 stime;
270 u64 gtime;
271};
272
273
274
275
276
277
278
279enum uclamp_id {
280 UCLAMP_MIN = 0,
281 UCLAMP_MAX,
282 UCLAMP_CNT
283};
284
285#ifdef CONFIG_SMP
286extern struct root_domain def_root_domain;
287extern struct mutex sched_domains_mutex;
288#endif
289
290struct sched_info {
291#ifdef CONFIG_SCHED_INFO
292
293
294
295 unsigned long pcount;
296
297
298 unsigned long long run_delay;
299
300
301
302
303 unsigned long long last_arrival;
304
305
306 unsigned long long last_queued;
307
308#endif
309};
310
311
312
313
314
315
316
317
318# define SCHED_FIXEDPOINT_SHIFT 10
319# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
320
321
322# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
323# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
324
325struct load_weight {
326 unsigned long weight;
327 u32 inv_weight;
328};
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352struct util_est {
353 unsigned int enqueued;
354 unsigned int ewma;
355#define UTIL_EST_WEIGHT_SHIFT 2
356} __attribute__((__aligned__(sizeof(u64))));
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403struct sched_avg {
404 u64 last_update_time;
405 u64 load_sum;
406 u64 runnable_sum;
407 u32 util_sum;
408 u32 period_contrib;
409 unsigned long load_avg;
410 unsigned long runnable_avg;
411 unsigned long util_avg;
412 struct util_est util_est;
413} ____cacheline_aligned;
414
415struct sched_statistics {
416#ifdef CONFIG_SCHEDSTATS
417 u64 wait_start;
418 u64 wait_max;
419 u64 wait_count;
420 u64 wait_sum;
421 u64 iowait_count;
422 u64 iowait_sum;
423
424 u64 sleep_start;
425 u64 sleep_max;
426 s64 sum_sleep_runtime;
427
428 u64 block_start;
429 u64 block_max;
430 u64 exec_max;
431 u64 slice_max;
432
433 u64 nr_migrations_cold;
434 u64 nr_failed_migrations_affine;
435 u64 nr_failed_migrations_running;
436 u64 nr_failed_migrations_hot;
437 u64 nr_forced_migrations;
438
439 u64 nr_wakeups;
440 u64 nr_wakeups_sync;
441 u64 nr_wakeups_migrate;
442 u64 nr_wakeups_local;
443 u64 nr_wakeups_remote;
444 u64 nr_wakeups_affine;
445 u64 nr_wakeups_affine_attempts;
446 u64 nr_wakeups_passive;
447 u64 nr_wakeups_idle;
448#endif
449};
450
451struct sched_entity {
452
453 struct load_weight load;
454 struct rb_node run_node;
455 struct list_head group_node;
456 unsigned int on_rq;
457
458 u64 exec_start;
459 u64 sum_exec_runtime;
460 u64 vruntime;
461 u64 prev_sum_exec_runtime;
462
463 u64 nr_migrations;
464
465 struct sched_statistics statistics;
466
467#ifdef CONFIG_FAIR_GROUP_SCHED
468 int depth;
469 struct sched_entity *parent;
470
471 struct cfs_rq *cfs_rq;
472
473 struct cfs_rq *my_q;
474
475 unsigned long runnable_weight;
476#endif
477
478#ifdef CONFIG_SMP
479
480
481
482
483
484
485 struct sched_avg avg;
486#endif
487};
488
489struct sched_rt_entity {
490 struct list_head run_list;
491 unsigned long timeout;
492 unsigned long watchdog_stamp;
493 unsigned int time_slice;
494 unsigned short on_rq;
495 unsigned short on_list;
496
497 struct sched_rt_entity *back;
498#ifdef CONFIG_RT_GROUP_SCHED
499 struct sched_rt_entity *parent;
500
501 struct rt_rq *rt_rq;
502
503 struct rt_rq *my_q;
504#endif
505} __randomize_layout;
506
507struct sched_dl_entity {
508 struct rb_node rb_node;
509
510
511
512
513
514
515 u64 dl_runtime;
516 u64 dl_deadline;
517 u64 dl_period;
518 u64 dl_bw;
519 u64 dl_density;
520
521
522
523
524
525
526 s64 runtime;
527 u64 deadline;
528 unsigned int flags;
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554 unsigned int dl_throttled : 1;
555 unsigned int dl_yielded : 1;
556 unsigned int dl_non_contending : 1;
557 unsigned int dl_overrun : 1;
558
559
560
561
562
563 struct hrtimer dl_timer;
564
565
566
567
568
569
570
571
572 struct hrtimer inactive_timer;
573
574#ifdef CONFIG_RT_MUTEXES
575
576
577
578
579
580 struct sched_dl_entity *pi_se;
581#endif
582};
583
584#ifdef CONFIG_UCLAMP_TASK
585
586#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611struct uclamp_se {
612 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
613 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
614 unsigned int active : 1;
615 unsigned int user_defined : 1;
616};
617#endif
618
619union rcu_special {
620 struct {
621 u8 blocked;
622 u8 need_qs;
623 u8 exp_hint;
624 u8 need_mb;
625 } b;
626 u32 s;
627};
628
629enum perf_event_task_context {
630 perf_invalid_context = -1,
631 perf_hw_context = 0,
632 perf_sw_context,
633 perf_nr_task_contexts,
634};
635
636struct wake_q_node {
637 struct wake_q_node *next;
638};
639
640struct task_struct {
641#ifdef CONFIG_THREAD_INFO_IN_TASK
642
643
644
645
646 struct thread_info thread_info;
647#endif
648
649 volatile long state;
650
651
652
653
654
655 randomized_struct_fields_start
656
657 void *stack;
658 refcount_t usage;
659
660 unsigned int flags;
661 unsigned int ptrace;
662
663#ifdef CONFIG_SMP
664 int on_cpu;
665 struct __call_single_node wake_entry;
666#ifdef CONFIG_THREAD_INFO_IN_TASK
667
668 unsigned int cpu;
669#endif
670 unsigned int wakee_flips;
671 unsigned long wakee_flip_decay_ts;
672 struct task_struct *last_wakee;
673
674
675
676
677
678
679
680
681 int recent_used_cpu;
682 int wake_cpu;
683#endif
684 int on_rq;
685
686 int prio;
687 int static_prio;
688 int normal_prio;
689 unsigned int rt_priority;
690
691 const struct sched_class *sched_class;
692 struct sched_entity se;
693 struct sched_rt_entity rt;
694#ifdef CONFIG_CGROUP_SCHED
695 struct task_group *sched_task_group;
696#endif
697 struct sched_dl_entity dl;
698
699#ifdef CONFIG_UCLAMP_TASK
700
701
702
703
704 struct uclamp_se uclamp_req[UCLAMP_CNT];
705
706
707
708
709 struct uclamp_se uclamp[UCLAMP_CNT];
710#endif
711
712#ifdef CONFIG_PREEMPT_NOTIFIERS
713
714 struct hlist_head preempt_notifiers;
715#endif
716
717#ifdef CONFIG_BLK_DEV_IO_TRACE
718 unsigned int btrace_seq;
719#endif
720
721 unsigned int policy;
722 int nr_cpus_allowed;
723 const cpumask_t *cpus_ptr;
724 cpumask_t cpus_mask;
725
726#ifdef CONFIG_PREEMPT_RCU
727 int rcu_read_lock_nesting;
728 union rcu_special rcu_read_unlock_special;
729 struct list_head rcu_node_entry;
730 struct rcu_node *rcu_blocked_node;
731#endif
732
733#ifdef CONFIG_TASKS_RCU
734 unsigned long rcu_tasks_nvcsw;
735 u8 rcu_tasks_holdout;
736 u8 rcu_tasks_idx;
737 int rcu_tasks_idle_cpu;
738 struct list_head rcu_tasks_holdout_list;
739#endif
740
741#ifdef CONFIG_TASKS_TRACE_RCU
742 int trc_reader_nesting;
743 int trc_ipi_to_cpu;
744 union rcu_special trc_reader_special;
745 bool trc_reader_checked;
746 struct list_head trc_holdout_list;
747#endif
748
749 struct sched_info sched_info;
750
751 struct list_head tasks;
752#ifdef CONFIG_SMP
753 struct plist_node pushable_tasks;
754 struct rb_node pushable_dl_tasks;
755#endif
756
757 struct mm_struct *mm;
758 struct mm_struct *active_mm;
759
760
761 struct vmacache vmacache;
762
763#ifdef SPLIT_RSS_COUNTING
764 struct task_rss_stat rss_stat;
765#endif
766 int exit_state;
767 int exit_code;
768 int exit_signal;
769
770 int pdeath_signal;
771
772 unsigned long jobctl;
773
774
775 unsigned int personality;
776
777
778 unsigned sched_reset_on_fork:1;
779 unsigned sched_contributes_to_load:1;
780 unsigned sched_migrated:1;
781#ifdef CONFIG_PSI
782 unsigned sched_psi_wake_requeue:1;
783#endif
784
785
786 unsigned :0;
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803 unsigned sched_remote_wakeup:1;
804
805
806 unsigned in_execve:1;
807 unsigned in_iowait:1;
808#ifndef TIF_RESTORE_SIGMASK
809 unsigned restore_sigmask:1;
810#endif
811#ifdef CONFIG_MEMCG
812 unsigned in_user_fault:1;
813#endif
814#ifdef CONFIG_COMPAT_BRK
815 unsigned brk_randomized:1;
816#endif
817#ifdef CONFIG_CGROUPS
818
819 unsigned no_cgroup_migration:1;
820
821 unsigned frozen:1;
822#endif
823#ifdef CONFIG_BLK_CGROUP
824 unsigned use_memdelay:1;
825#endif
826#ifdef CONFIG_PSI
827
828 unsigned in_memstall:1;
829#endif
830
831 unsigned long atomic_flags;
832
833 struct restart_block restart_block;
834
835 pid_t pid;
836 pid_t tgid;
837
838#ifdef CONFIG_STACKPROTECTOR
839
840 unsigned long stack_canary;
841#endif
842
843
844
845
846
847
848
849 struct task_struct __rcu *real_parent;
850
851
852 struct task_struct __rcu *parent;
853
854
855
856
857 struct list_head children;
858 struct list_head sibling;
859 struct task_struct *group_leader;
860
861
862
863
864
865
866
867 struct list_head ptraced;
868 struct list_head ptrace_entry;
869
870
871 struct pid *thread_pid;
872 struct hlist_node pid_links[PIDTYPE_MAX];
873 struct list_head thread_group;
874 struct list_head thread_node;
875
876 struct completion *vfork_done;
877
878
879 int __user *set_child_tid;
880
881
882 int __user *clear_child_tid;
883
884 u64 utime;
885 u64 stime;
886#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
887 u64 utimescaled;
888 u64 stimescaled;
889#endif
890 u64 gtime;
891 struct prev_cputime prev_cputime;
892#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
893 struct vtime vtime;
894#endif
895
896#ifdef CONFIG_NO_HZ_FULL
897 atomic_t tick_dep_mask;
898#endif
899
900 unsigned long nvcsw;
901 unsigned long nivcsw;
902
903
904 u64 start_time;
905
906
907 u64 start_boottime;
908
909
910 unsigned long min_flt;
911 unsigned long maj_flt;
912
913
914 struct posix_cputimers posix_cputimers;
915
916#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
917 struct posix_cputimers_work posix_cputimers_work;
918#endif
919
920
921
922
923 const struct cred __rcu *ptracer_cred;
924
925
926 const struct cred __rcu *real_cred;
927
928
929 const struct cred __rcu *cred;
930
931#ifdef CONFIG_KEYS
932
933 struct key *cached_requested_key;
934#endif
935
936
937
938
939
940
941
942
943 char comm[TASK_COMM_LEN];
944
945 struct nameidata *nameidata;
946
947#ifdef CONFIG_SYSVIPC
948 struct sysv_sem sysvsem;
949 struct sysv_shm sysvshm;
950#endif
951#ifdef CONFIG_DETECT_HUNG_TASK
952 unsigned long last_switch_count;
953 unsigned long last_switch_time;
954#endif
955
956 struct fs_struct *fs;
957
958
959 struct files_struct *files;
960
961#ifdef CONFIG_IO_URING
962 struct io_uring_task *io_uring;
963#endif
964
965
966 struct nsproxy *nsproxy;
967
968
969 struct signal_struct *signal;
970 struct sighand_struct __rcu *sighand;
971 sigset_t blocked;
972 sigset_t real_blocked;
973
974 sigset_t saved_sigmask;
975 struct sigpending pending;
976 unsigned long sas_ss_sp;
977 size_t sas_ss_size;
978 unsigned int sas_ss_flags;
979
980 struct callback_head *task_works;
981
982#ifdef CONFIG_AUDIT
983#ifdef CONFIG_AUDITSYSCALL
984 struct audit_context *audit_context;
985#endif
986 kuid_t loginuid;
987 unsigned int sessionid;
988#endif
989 struct seccomp seccomp;
990
991
992 u64 parent_exec_id;
993 u64 self_exec_id;
994
995
996 spinlock_t alloc_lock;
997
998
999 raw_spinlock_t pi_lock;
1000
1001 struct wake_q_node wake_q;
1002
1003#ifdef CONFIG_RT_MUTEXES
1004
1005 struct rb_root_cached pi_waiters;
1006
1007 struct task_struct *pi_top_task;
1008
1009 struct rt_mutex_waiter *pi_blocked_on;
1010#endif
1011
1012#ifdef CONFIG_DEBUG_MUTEXES
1013
1014 struct mutex_waiter *blocked_on;
1015#endif
1016
1017#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1018 int non_block_count;
1019#endif
1020
1021#ifdef CONFIG_TRACE_IRQFLAGS
1022 struct irqtrace_events irqtrace;
1023 unsigned int hardirq_threaded;
1024 u64 hardirq_chain_key;
1025 int softirqs_enabled;
1026 int softirq_context;
1027 int irq_config;
1028#endif
1029
1030#ifdef CONFIG_LOCKDEP
1031# define MAX_LOCK_DEPTH 48UL
1032 u64 curr_chain_key;
1033 int lockdep_depth;
1034 unsigned int lockdep_recursion;
1035 struct held_lock held_locks[MAX_LOCK_DEPTH];
1036#endif
1037
1038#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1039 unsigned int in_ubsan;
1040#endif
1041
1042
1043 void *journal_info;
1044
1045
1046 struct bio_list *bio_list;
1047
1048#ifdef CONFIG_BLOCK
1049
1050 struct blk_plug *plug;
1051#endif
1052
1053
1054 struct reclaim_state *reclaim_state;
1055
1056 struct backing_dev_info *backing_dev_info;
1057
1058 struct io_context *io_context;
1059
1060#ifdef CONFIG_COMPACTION
1061 struct capture_control *capture_control;
1062#endif
1063
1064 unsigned long ptrace_message;
1065 kernel_siginfo_t *last_siginfo;
1066
1067 struct task_io_accounting ioac;
1068#ifdef CONFIG_PSI
1069
1070 unsigned int psi_flags;
1071#endif
1072#ifdef CONFIG_TASK_XACCT
1073
1074 u64 acct_rss_mem1;
1075
1076 u64 acct_vm_mem1;
1077
1078 u64 acct_timexpd;
1079#endif
1080#ifdef CONFIG_CPUSETS
1081
1082 nodemask_t mems_allowed;
1083
1084 seqcount_spinlock_t mems_allowed_seq;
1085 int cpuset_mem_spread_rotor;
1086 int cpuset_slab_spread_rotor;
1087#endif
1088#ifdef CONFIG_CGROUPS
1089
1090 struct css_set __rcu *cgroups;
1091
1092 struct list_head cg_list;
1093#endif
1094#ifdef CONFIG_X86_CPU_RESCTRL
1095 u32 closid;
1096 u32 rmid;
1097#endif
1098#ifdef CONFIG_FUTEX
1099 struct robust_list_head __user *robust_list;
1100#ifdef CONFIG_COMPAT
1101 struct compat_robust_list_head __user *compat_robust_list;
1102#endif
1103 struct list_head pi_state_list;
1104 struct futex_pi_state *pi_state_cache;
1105 struct mutex futex_exit_mutex;
1106 unsigned int futex_state;
1107#endif
1108#ifdef CONFIG_PERF_EVENTS
1109 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1110 struct mutex perf_event_mutex;
1111 struct list_head perf_event_list;
1112#endif
1113#ifdef CONFIG_DEBUG_PREEMPT
1114 unsigned long preempt_disable_ip;
1115#endif
1116#ifdef CONFIG_NUMA
1117
1118 struct mempolicy *mempolicy;
1119 short il_prev;
1120 short pref_node_fork;
1121#endif
1122#ifdef CONFIG_NUMA_BALANCING
1123 int numa_scan_seq;
1124 unsigned int numa_scan_period;
1125 unsigned int numa_scan_period_max;
1126 int numa_preferred_nid;
1127 unsigned long numa_migrate_retry;
1128
1129 u64 node_stamp;
1130 u64 last_task_numa_placement;
1131 u64 last_sum_exec_runtime;
1132 struct callback_head numa_work;
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 struct numa_group __rcu *numa_group;
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 unsigned long *numa_faults;
1159 unsigned long total_numa_faults;
1160
1161
1162
1163
1164
1165
1166
1167 unsigned long numa_faults_locality[3];
1168
1169 unsigned long numa_pages_migrated;
1170#endif
1171
1172#ifdef CONFIG_RSEQ
1173 struct rseq __user *rseq;
1174 u32 rseq_sig;
1175
1176
1177
1178
1179 unsigned long rseq_event_mask;
1180#endif
1181
1182 struct tlbflush_unmap_batch tlb_ubc;
1183
1184 union {
1185 refcount_t rcu_users;
1186 struct rcu_head rcu;
1187 };
1188
1189
1190 struct pipe_inode_info *splice_pipe;
1191
1192 struct page_frag task_frag;
1193
1194#ifdef CONFIG_TASK_DELAY_ACCT
1195 struct task_delay_info *delays;
1196#endif
1197
1198#ifdef CONFIG_FAULT_INJECTION
1199 int make_it_fail;
1200 unsigned int fail_nth;
1201#endif
1202
1203
1204
1205
1206 int nr_dirtied;
1207 int nr_dirtied_pause;
1208
1209 unsigned long dirty_paused_when;
1210
1211#ifdef CONFIG_LATENCYTOP
1212 int latency_record_count;
1213 struct latency_record latency_record[LT_SAVECOUNT];
1214#endif
1215
1216
1217
1218
1219 u64 timer_slack_ns;
1220 u64 default_timer_slack_ns;
1221
1222#ifdef CONFIG_KASAN
1223 unsigned int kasan_depth;
1224#endif
1225
1226#ifdef CONFIG_KCSAN
1227 struct kcsan_ctx kcsan_ctx;
1228#ifdef CONFIG_TRACE_IRQFLAGS
1229 struct irqtrace_events kcsan_save_irqtrace;
1230#endif
1231#endif
1232
1233#if IS_ENABLED(CONFIG_KUNIT)
1234 struct kunit *kunit_test;
1235#endif
1236
1237#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1238
1239 int curr_ret_stack;
1240 int curr_ret_depth;
1241
1242
1243 struct ftrace_ret_stack *ret_stack;
1244
1245
1246 unsigned long long ftrace_timestamp;
1247
1248
1249
1250
1251
1252 atomic_t trace_overrun;
1253
1254
1255 atomic_t tracing_graph_pause;
1256#endif
1257
1258#ifdef CONFIG_TRACING
1259
1260 unsigned long trace;
1261
1262
1263 unsigned long trace_recursion;
1264#endif
1265
1266#ifdef CONFIG_KCOV
1267
1268
1269
1270 unsigned int kcov_mode;
1271
1272
1273 unsigned int kcov_size;
1274
1275
1276 void *kcov_area;
1277
1278
1279 struct kcov *kcov;
1280
1281
1282 u64 kcov_handle;
1283
1284
1285 int kcov_sequence;
1286
1287
1288 unsigned int kcov_softirq;
1289#endif
1290
1291#ifdef CONFIG_MEMCG
1292 struct mem_cgroup *memcg_in_oom;
1293 gfp_t memcg_oom_gfp_mask;
1294 int memcg_oom_order;
1295
1296
1297 unsigned int memcg_nr_pages_over_high;
1298
1299
1300 struct mem_cgroup *active_memcg;
1301#endif
1302
1303#ifdef CONFIG_BLK_CGROUP
1304 struct request_queue *throttle_queue;
1305#endif
1306
1307#ifdef CONFIG_UPROBES
1308 struct uprobe_task *utask;
1309#endif
1310#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1311 unsigned int sequential_io;
1312 unsigned int sequential_io_avg;
1313#endif
1314#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1315 unsigned long task_state_change;
1316#endif
1317 int pagefault_disabled;
1318#ifdef CONFIG_MMU
1319 struct task_struct *oom_reaper_list;
1320#endif
1321#ifdef CONFIG_VMAP_STACK
1322 struct vm_struct *stack_vm_area;
1323#endif
1324#ifdef CONFIG_THREAD_INFO_IN_TASK
1325
1326 refcount_t stack_refcount;
1327#endif
1328#ifdef CONFIG_LIVEPATCH
1329 int patch_state;
1330#endif
1331#ifdef CONFIG_SECURITY
1332
1333 void *security;
1334#endif
1335
1336#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1337 unsigned long lowest_stack;
1338 unsigned long prev_lowest_stack;
1339#endif
1340
1341#ifdef CONFIG_X86_MCE
1342 void __user *mce_vaddr;
1343 __u64 mce_kflags;
1344 u64 mce_addr;
1345 __u64 mce_ripv : 1,
1346 mce_whole_page : 1,
1347 __mce_reserved : 62;
1348 struct callback_head mce_kill_me;
1349#endif
1350
1351
1352
1353
1354
1355 randomized_struct_fields_end
1356
1357
1358 struct thread_struct thread;
1359
1360
1361
1362
1363
1364
1365
1366};
1367
1368static inline struct pid *task_pid(struct task_struct *task)
1369{
1370 return task->thread_pid;
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1385
1386static inline pid_t task_pid_nr(struct task_struct *tsk)
1387{
1388 return tsk->pid;
1389}
1390
1391static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1392{
1393 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1394}
1395
1396static inline pid_t task_pid_vnr(struct task_struct *tsk)
1397{
1398 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1399}
1400
1401
1402static inline pid_t task_tgid_nr(struct task_struct *tsk)
1403{
1404 return tsk->tgid;
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417static inline int pid_alive(const struct task_struct *p)
1418{
1419 return p->thread_pid != NULL;
1420}
1421
1422static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1423{
1424 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1425}
1426
1427static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1428{
1429 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1430}
1431
1432
1433static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1434{
1435 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1436}
1437
1438static inline pid_t task_session_vnr(struct task_struct *tsk)
1439{
1440 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1441}
1442
1443static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1444{
1445 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1446}
1447
1448static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1449{
1450 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1451}
1452
1453static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1454{
1455 pid_t pid = 0;
1456
1457 rcu_read_lock();
1458 if (pid_alive(tsk))
1459 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1460 rcu_read_unlock();
1461
1462 return pid;
1463}
1464
1465static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1466{
1467 return task_ppid_nr_ns(tsk, &init_pid_ns);
1468}
1469
1470
1471static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1472{
1473 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1474}
1475
1476#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1477#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1478
1479static inline unsigned int task_state_index(struct task_struct *tsk)
1480{
1481 unsigned int tsk_state = READ_ONCE(tsk->state);
1482 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1483
1484 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1485
1486 if (tsk_state == TASK_IDLE)
1487 state = TASK_REPORT_IDLE;
1488
1489 return fls(state);
1490}
1491
1492static inline char task_index_to_char(unsigned int state)
1493{
1494 static const char state_char[] = "RSDTtXZPI";
1495
1496 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1497
1498 return state_char[state];
1499}
1500
1501static inline char task_state_to_char(struct task_struct *tsk)
1502{
1503 return task_index_to_char(task_state_index(tsk));
1504}
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515static inline int is_global_init(struct task_struct *tsk)
1516{
1517 return task_tgid_nr(tsk) == 1;
1518}
1519
1520extern struct pid *cad_pid;
1521
1522
1523
1524
1525#define PF_VCPU 0x00000001
1526#define PF_IDLE 0x00000002
1527#define PF_EXITING 0x00000004
1528#define PF_IO_WORKER 0x00000010
1529#define PF_WQ_WORKER 0x00000020
1530#define PF_FORKNOEXEC 0x00000040
1531#define PF_MCE_PROCESS 0x00000080
1532#define PF_SUPERPRIV 0x00000100
1533#define PF_DUMPCORE 0x00000200
1534#define PF_SIGNALED 0x00000400
1535#define PF_MEMALLOC 0x00000800
1536#define PF_NPROC_EXCEEDED 0x00001000
1537#define PF_USED_MATH 0x00002000
1538#define PF_USED_ASYNC 0x00004000
1539#define PF_NOFREEZE 0x00008000
1540#define PF_FROZEN 0x00010000
1541#define PF_KSWAPD 0x00020000
1542#define PF_MEMALLOC_NOFS 0x00040000
1543#define PF_MEMALLOC_NOIO 0x00080000
1544#define PF_LOCAL_THROTTLE 0x00100000
1545
1546#define PF_KTHREAD 0x00200000
1547#define PF_RANDOMIZE 0x00400000
1548#define PF_SWAPWRITE 0x00800000
1549#define PF_NO_SETAFFINITY 0x04000000
1550#define PF_MCE_EARLY 0x08000000
1551#define PF_MEMALLOC_NOCMA 0x10000000
1552#define PF_FREEZER_SKIP 0x40000000
1553#define PF_SUSPEND_TASK 0x80000000
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1567#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1568#define clear_used_math() clear_stopped_child_used_math(current)
1569#define set_used_math() set_stopped_child_used_math(current)
1570
1571#define conditional_stopped_child_used_math(condition, child) \
1572 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1573
1574#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1575
1576#define copy_to_stopped_child_used_math(child) \
1577 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1578
1579
1580#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1581#define used_math() tsk_used_math(current)
1582
1583static inline bool is_percpu_thread(void)
1584{
1585#ifdef CONFIG_SMP
1586 return (current->flags & PF_NO_SETAFFINITY) &&
1587 (current->nr_cpus_allowed == 1);
1588#else
1589 return true;
1590#endif
1591}
1592
1593
1594#define PFA_NO_NEW_PRIVS 0
1595#define PFA_SPREAD_PAGE 1
1596#define PFA_SPREAD_SLAB 2
1597#define PFA_SPEC_SSB_DISABLE 3
1598#define PFA_SPEC_SSB_FORCE_DISABLE 4
1599#define PFA_SPEC_IB_DISABLE 5
1600#define PFA_SPEC_IB_FORCE_DISABLE 6
1601#define PFA_SPEC_SSB_NOEXEC 7
1602
1603#define TASK_PFA_TEST(name, func) \
1604 static inline bool task_##func(struct task_struct *p) \
1605 { return test_bit(PFA_##name, &p->atomic_flags); }
1606
1607#define TASK_PFA_SET(name, func) \
1608 static inline void task_set_##func(struct task_struct *p) \
1609 { set_bit(PFA_##name, &p->atomic_flags); }
1610
1611#define TASK_PFA_CLEAR(name, func) \
1612 static inline void task_clear_##func(struct task_struct *p) \
1613 { clear_bit(PFA_##name, &p->atomic_flags); }
1614
1615TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1616TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1617
1618TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1619TASK_PFA_SET(SPREAD_PAGE, spread_page)
1620TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1621
1622TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1623TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1624TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1625
1626TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1627TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1628TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1629
1630TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1631TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1632TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1633
1634TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1635TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1636
1637TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1638TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1639TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1640
1641TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1642TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1643
1644static inline void
1645current_restore_flags(unsigned long orig_flags, unsigned long flags)
1646{
1647 current->flags &= ~flags;
1648 current->flags |= orig_flags & flags;
1649}
1650
1651extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1652extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1653#ifdef CONFIG_SMP
1654extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1655extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1656#else
1657static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1658{
1659}
1660static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1661{
1662 if (!cpumask_test_cpu(0, new_mask))
1663 return -EINVAL;
1664 return 0;
1665}
1666#endif
1667
1668extern int yield_to(struct task_struct *p, bool preempt);
1669extern void set_user_nice(struct task_struct *p, long nice);
1670extern int task_prio(const struct task_struct *p);
1671
1672
1673
1674
1675
1676
1677
1678static inline int task_nice(const struct task_struct *p)
1679{
1680 return PRIO_TO_NICE((p)->static_prio);
1681}
1682
1683extern int can_nice(const struct task_struct *p, const int nice);
1684extern int task_curr(const struct task_struct *p);
1685extern int idle_cpu(int cpu);
1686extern int available_idle_cpu(int cpu);
1687extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1688extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1689extern void sched_set_fifo(struct task_struct *p);
1690extern void sched_set_fifo_low(struct task_struct *p);
1691extern void sched_set_normal(struct task_struct *p, int nice);
1692extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1693extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1694extern struct task_struct *idle_task(int cpu);
1695
1696
1697
1698
1699
1700
1701
1702static __always_inline bool is_idle_task(const struct task_struct *p)
1703{
1704 return !!(p->flags & PF_IDLE);
1705}
1706
1707extern struct task_struct *curr_task(int cpu);
1708extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1709
1710void yield(void);
1711
1712union thread_union {
1713#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1714 struct task_struct task;
1715#endif
1716#ifndef CONFIG_THREAD_INFO_IN_TASK
1717 struct thread_info thread_info;
1718#endif
1719 unsigned long stack[THREAD_SIZE/sizeof(long)];
1720};
1721
1722#ifndef CONFIG_THREAD_INFO_IN_TASK
1723extern struct thread_info init_thread_info;
1724#endif
1725
1726extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1727
1728#ifdef CONFIG_THREAD_INFO_IN_TASK
1729static inline struct thread_info *task_thread_info(struct task_struct *task)
1730{
1731 return &task->thread_info;
1732}
1733#elif !defined(__HAVE_THREAD_FUNCTIONS)
1734# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1735#endif
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748extern struct task_struct *find_task_by_vpid(pid_t nr);
1749extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1750
1751
1752
1753
1754extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1755
1756extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1757extern int wake_up_process(struct task_struct *tsk);
1758extern void wake_up_new_task(struct task_struct *tsk);
1759
1760#ifdef CONFIG_SMP
1761extern void kick_process(struct task_struct *tsk);
1762#else
1763static inline void kick_process(struct task_struct *tsk) { }
1764#endif
1765
1766extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1767
1768static inline void set_task_comm(struct task_struct *tsk, const char *from)
1769{
1770 __set_task_comm(tsk, from, false);
1771}
1772
1773extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1774#define get_task_comm(buf, tsk) ({ \
1775 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1776 __get_task_comm(buf, sizeof(buf), tsk); \
1777})
1778
1779#ifdef CONFIG_SMP
1780static __always_inline void scheduler_ipi(void)
1781{
1782
1783
1784
1785
1786
1787 preempt_fold_need_resched();
1788}
1789extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1790#else
1791static inline void scheduler_ipi(void) { }
1792static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1793{
1794 return 1;
1795}
1796#endif
1797
1798
1799
1800
1801
1802static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1803{
1804 set_ti_thread_flag(task_thread_info(tsk), flag);
1805}
1806
1807static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1808{
1809 clear_ti_thread_flag(task_thread_info(tsk), flag);
1810}
1811
1812static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1813 bool value)
1814{
1815 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1816}
1817
1818static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1819{
1820 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1821}
1822
1823static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1824{
1825 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1826}
1827
1828static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1829{
1830 return test_ti_thread_flag(task_thread_info(tsk), flag);
1831}
1832
1833static inline void set_tsk_need_resched(struct task_struct *tsk)
1834{
1835 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1836}
1837
1838static inline void clear_tsk_need_resched(struct task_struct *tsk)
1839{
1840 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1841}
1842
1843static inline int test_tsk_need_resched(struct task_struct *tsk)
1844{
1845 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1846}
1847
1848
1849
1850
1851
1852
1853
1854#ifndef CONFIG_PREEMPTION
1855extern int _cond_resched(void);
1856#else
1857static inline int _cond_resched(void) { return 0; }
1858#endif
1859
1860#define cond_resched() ({ \
1861 ___might_sleep(__FILE__, __LINE__, 0); \
1862 _cond_resched(); \
1863})
1864
1865extern int __cond_resched_lock(spinlock_t *lock);
1866
1867#define cond_resched_lock(lock) ({ \
1868 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1869 __cond_resched_lock(lock); \
1870})
1871
1872static inline void cond_resched_rcu(void)
1873{
1874#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1875 rcu_read_unlock();
1876 cond_resched();
1877 rcu_read_lock();
1878#endif
1879}
1880
1881
1882
1883
1884
1885
1886static inline int spin_needbreak(spinlock_t *lock)
1887{
1888#ifdef CONFIG_PREEMPTION
1889 return spin_is_contended(lock);
1890#else
1891 return 0;
1892#endif
1893}
1894
1895static __always_inline bool need_resched(void)
1896{
1897 return unlikely(tif_need_resched());
1898}
1899
1900
1901
1902
1903#ifdef CONFIG_SMP
1904
1905static inline unsigned int task_cpu(const struct task_struct *p)
1906{
1907#ifdef CONFIG_THREAD_INFO_IN_TASK
1908 return READ_ONCE(p->cpu);
1909#else
1910 return READ_ONCE(task_thread_info(p)->cpu);
1911#endif
1912}
1913
1914extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1915
1916#else
1917
1918static inline unsigned int task_cpu(const struct task_struct *p)
1919{
1920 return 0;
1921}
1922
1923static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1924{
1925}
1926
1927#endif
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937#ifndef vcpu_is_preempted
1938static inline bool vcpu_is_preempted(int cpu)
1939{
1940 return false;
1941}
1942#endif
1943
1944extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1945extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1946
1947#ifndef TASK_SIZE_OF
1948#define TASK_SIZE_OF(tsk) TASK_SIZE
1949#endif
1950
1951#ifdef CONFIG_RSEQ
1952
1953
1954
1955
1956
1957enum rseq_event_mask_bits {
1958 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1959 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1960 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1961};
1962
1963enum rseq_event_mask {
1964 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
1965 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
1966 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
1967};
1968
1969static inline void rseq_set_notify_resume(struct task_struct *t)
1970{
1971 if (t->rseq)
1972 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1973}
1974
1975void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1976
1977static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1978 struct pt_regs *regs)
1979{
1980 if (current->rseq)
1981 __rseq_handle_notify_resume(ksig, regs);
1982}
1983
1984static inline void rseq_signal_deliver(struct ksignal *ksig,
1985 struct pt_regs *regs)
1986{
1987 preempt_disable();
1988 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
1989 preempt_enable();
1990 rseq_handle_notify_resume(ksig, regs);
1991}
1992
1993
1994static inline void rseq_preempt(struct task_struct *t)
1995{
1996 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
1997 rseq_set_notify_resume(t);
1998}
1999
2000
2001static inline void rseq_migrate(struct task_struct *t)
2002{
2003 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2004 rseq_set_notify_resume(t);
2005}
2006
2007
2008
2009
2010
2011static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2012{
2013 if (clone_flags & CLONE_VM) {
2014 t->rseq = NULL;
2015 t->rseq_sig = 0;
2016 t->rseq_event_mask = 0;
2017 } else {
2018 t->rseq = current->rseq;
2019 t->rseq_sig = current->rseq_sig;
2020 t->rseq_event_mask = current->rseq_event_mask;
2021 }
2022}
2023
2024static inline void rseq_execve(struct task_struct *t)
2025{
2026 t->rseq = NULL;
2027 t->rseq_sig = 0;
2028 t->rseq_event_mask = 0;
2029}
2030
2031#else
2032
2033static inline void rseq_set_notify_resume(struct task_struct *t)
2034{
2035}
2036static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2037 struct pt_regs *regs)
2038{
2039}
2040static inline void rseq_signal_deliver(struct ksignal *ksig,
2041 struct pt_regs *regs)
2042{
2043}
2044static inline void rseq_preempt(struct task_struct *t)
2045{
2046}
2047static inline void rseq_migrate(struct task_struct *t)
2048{
2049}
2050static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2051{
2052}
2053static inline void rseq_execve(struct task_struct *t)
2054{
2055}
2056
2057#endif
2058
2059#ifdef CONFIG_DEBUG_RSEQ
2060
2061void rseq_syscall(struct pt_regs *regs);
2062
2063#else
2064
2065static inline void rseq_syscall(struct pt_regs *regs)
2066{
2067}
2068
2069#endif
2070
2071const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2072char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2073int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2074
2075const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2076const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2077const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2078
2079int sched_trace_rq_cpu(struct rq *rq);
2080int sched_trace_rq_cpu_capacity(struct rq *rq);
2081int sched_trace_rq_nr_running(struct rq *rq);
2082
2083const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2084
2085#endif
2086