1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/kcov.h>
18#include <linux/mutex.h>
19#include <linux/plist.h>
20#include <linux/hrtimer.h>
21#include <linux/irqflags.h>
22#include <linux/seccomp.h>
23#include <linux/nodemask.h>
24#include <linux/rcupdate.h>
25#include <linux/refcount.h>
26#include <linux/resource.h>
27#include <linux/latencytop.h>
28#include <linux/sched/prio.h>
29#include <linux/sched/types.h>
30#include <linux/signal_types.h>
31#include <linux/mm_types_task.h>
32#include <linux/task_io_accounting.h>
33#include <linux/posix-timers.h>
34#include <linux/rseq.h>
35#include <linux/seqlock.h>
36#include <linux/kcsan.h>
37
38
39struct audit_context;
40struct backing_dev_info;
41struct bio_list;
42struct blk_plug;
43struct capture_control;
44struct cfs_rq;
45struct fs_struct;
46struct futex_pi_state;
47struct io_context;
48struct mempolicy;
49struct nameidata;
50struct nsproxy;
51struct perf_event_context;
52struct pid_namespace;
53struct pipe_inode_info;
54struct rcu_node;
55struct reclaim_state;
56struct robust_list_head;
57struct root_domain;
58struct rq;
59struct sched_attr;
60struct sched_param;
61struct seq_file;
62struct sighand_struct;
63struct signal_struct;
64struct task_delay_info;
65struct task_group;
66
67
68
69
70
71
72
73
74
75
76
77
78
79#define TASK_RUNNING 0x0000
80#define TASK_INTERRUPTIBLE 0x0001
81#define TASK_UNINTERRUPTIBLE 0x0002
82#define __TASK_STOPPED 0x0004
83#define __TASK_TRACED 0x0008
84
85#define EXIT_DEAD 0x0010
86#define EXIT_ZOMBIE 0x0020
87#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
88
89#define TASK_PARKED 0x0040
90#define TASK_DEAD 0x0080
91#define TASK_WAKEKILL 0x0100
92#define TASK_WAKING 0x0200
93#define TASK_NOLOAD 0x0400
94#define TASK_NEW 0x0800
95#define TASK_STATE_MAX 0x1000
96
97
98#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
99#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
100#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
101
102#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
103
104
105#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
106
107
108#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
109 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
110 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
111 TASK_PARKED)
112
113#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
114
115#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
116
117#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
118
119#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
120
121
122
123
124
125#define is_special_task_state(state) \
126 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
127
128#define __set_current_state(state_value) \
129 do { \
130 WARN_ON_ONCE(is_special_task_state(state_value));\
131 current->task_state_change = _THIS_IP_; \
132 current->state = (state_value); \
133 } while (0)
134
135#define set_current_state(state_value) \
136 do { \
137 WARN_ON_ONCE(is_special_task_state(state_value));\
138 current->task_state_change = _THIS_IP_; \
139 smp_store_mb(current->state, (state_value)); \
140 } while (0)
141
142#define set_special_state(state_value) \
143 do { \
144 unsigned long flags; \
145 WARN_ON_ONCE(!is_special_task_state(state_value)); \
146 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
147 current->task_state_change = _THIS_IP_; \
148 current->state = (state_value); \
149 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
150 } while (0)
151#else
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189#define __set_current_state(state_value) \
190 current->state = (state_value)
191
192#define set_current_state(state_value) \
193 smp_store_mb(current->state, (state_value))
194
195
196
197
198
199
200
201#define set_special_state(state_value) \
202 do { \
203 unsigned long flags; \
204 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
205 current->state = (state_value); \
206 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
207 } while (0)
208
209#endif
210
211
212#define TASK_COMM_LEN 16
213
214extern void scheduler_tick(void);
215
216#define MAX_SCHEDULE_TIMEOUT LONG_MAX
217
218extern long schedule_timeout(long timeout);
219extern long schedule_timeout_interruptible(long timeout);
220extern long schedule_timeout_killable(long timeout);
221extern long schedule_timeout_uninterruptible(long timeout);
222extern long schedule_timeout_idle(long timeout);
223asmlinkage void schedule(void);
224extern void schedule_preempt_disabled(void);
225asmlinkage void preempt_schedule_irq(void);
226
227extern int __must_check io_schedule_prepare(void);
228extern void io_schedule_finish(int token);
229extern long io_schedule_timeout(long timeout);
230extern void io_schedule(void);
231
232
233
234
235
236
237
238
239
240
241struct prev_cputime {
242#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
243 u64 utime;
244 u64 stime;
245 raw_spinlock_t lock;
246#endif
247};
248
249enum vtime_state {
250
251 VTIME_INACTIVE = 0,
252
253 VTIME_IDLE,
254
255 VTIME_SYS,
256
257 VTIME_USER,
258
259 VTIME_GUEST,
260};
261
262struct vtime {
263 seqcount_t seqcount;
264 unsigned long long starttime;
265 enum vtime_state state;
266 unsigned int cpu;
267 u64 utime;
268 u64 stime;
269 u64 gtime;
270};
271
272
273
274
275
276
277
278enum uclamp_id {
279 UCLAMP_MIN = 0,
280 UCLAMP_MAX,
281 UCLAMP_CNT
282};
283
284#ifdef CONFIG_SMP
285extern struct root_domain def_root_domain;
286extern struct mutex sched_domains_mutex;
287#endif
288
289struct sched_info {
290#ifdef CONFIG_SCHED_INFO
291
292
293
294 unsigned long pcount;
295
296
297 unsigned long long run_delay;
298
299
300
301
302 unsigned long long last_arrival;
303
304
305 unsigned long long last_queued;
306
307#endif
308};
309
310
311
312
313
314
315
316
317# define SCHED_FIXEDPOINT_SHIFT 10
318# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
319
320
321# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
322# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
323
324struct load_weight {
325 unsigned long weight;
326 u32 inv_weight;
327};
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351struct util_est {
352 unsigned int enqueued;
353 unsigned int ewma;
354#define UTIL_EST_WEIGHT_SHIFT 2
355} __attribute__((__aligned__(sizeof(u64))));
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402struct sched_avg {
403 u64 last_update_time;
404 u64 load_sum;
405 u64 runnable_sum;
406 u32 util_sum;
407 u32 period_contrib;
408 unsigned long load_avg;
409 unsigned long runnable_avg;
410 unsigned long util_avg;
411 struct util_est util_est;
412} ____cacheline_aligned;
413
414struct sched_statistics {
415#ifdef CONFIG_SCHEDSTATS
416 u64 wait_start;
417 u64 wait_max;
418 u64 wait_count;
419 u64 wait_sum;
420 u64 iowait_count;
421 u64 iowait_sum;
422
423 u64 sleep_start;
424 u64 sleep_max;
425 s64 sum_sleep_runtime;
426
427 u64 block_start;
428 u64 block_max;
429 u64 exec_max;
430 u64 slice_max;
431
432 u64 nr_migrations_cold;
433 u64 nr_failed_migrations_affine;
434 u64 nr_failed_migrations_running;
435 u64 nr_failed_migrations_hot;
436 u64 nr_forced_migrations;
437
438 u64 nr_wakeups;
439 u64 nr_wakeups_sync;
440 u64 nr_wakeups_migrate;
441 u64 nr_wakeups_local;
442 u64 nr_wakeups_remote;
443 u64 nr_wakeups_affine;
444 u64 nr_wakeups_affine_attempts;
445 u64 nr_wakeups_passive;
446 u64 nr_wakeups_idle;
447#endif
448};
449
450struct sched_entity {
451
452 struct load_weight load;
453 struct rb_node run_node;
454 struct list_head group_node;
455 unsigned int on_rq;
456
457 u64 exec_start;
458 u64 sum_exec_runtime;
459 u64 vruntime;
460 u64 prev_sum_exec_runtime;
461
462 u64 nr_migrations;
463
464 struct sched_statistics statistics;
465
466#ifdef CONFIG_FAIR_GROUP_SCHED
467 int depth;
468 struct sched_entity *parent;
469
470 struct cfs_rq *cfs_rq;
471
472 struct cfs_rq *my_q;
473
474 unsigned long runnable_weight;
475#endif
476
477#ifdef CONFIG_SMP
478
479
480
481
482
483
484 struct sched_avg avg;
485#endif
486};
487
488struct sched_rt_entity {
489 struct list_head run_list;
490 unsigned long timeout;
491 unsigned long watchdog_stamp;
492 unsigned int time_slice;
493 unsigned short on_rq;
494 unsigned short on_list;
495
496 struct sched_rt_entity *back;
497#ifdef CONFIG_RT_GROUP_SCHED
498 struct sched_rt_entity *parent;
499
500 struct rt_rq *rt_rq;
501
502 struct rt_rq *my_q;
503#endif
504} __randomize_layout;
505
506struct sched_dl_entity {
507 struct rb_node rb_node;
508
509
510
511
512
513
514 u64 dl_runtime;
515 u64 dl_deadline;
516 u64 dl_period;
517 u64 dl_bw;
518 u64 dl_density;
519
520
521
522
523
524
525 s64 runtime;
526 u64 deadline;
527 unsigned int flags;
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553 unsigned int dl_throttled : 1;
554 unsigned int dl_boosted : 1;
555 unsigned int dl_yielded : 1;
556 unsigned int dl_non_contending : 1;
557 unsigned int dl_overrun : 1;
558
559
560
561
562
563 struct hrtimer dl_timer;
564
565
566
567
568
569
570
571
572 struct hrtimer inactive_timer;
573};
574
575#ifdef CONFIG_UCLAMP_TASK
576
577#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602struct uclamp_se {
603 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
604 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
605 unsigned int active : 1;
606 unsigned int user_defined : 1;
607};
608#endif
609
610union rcu_special {
611 struct {
612 u8 blocked;
613 u8 need_qs;
614 u8 exp_hint;
615 u8 need_mb;
616 } b;
617 u32 s;
618};
619
620enum perf_event_task_context {
621 perf_invalid_context = -1,
622 perf_hw_context = 0,
623 perf_sw_context,
624 perf_nr_task_contexts,
625};
626
627struct wake_q_node {
628 struct wake_q_node *next;
629};
630
631struct task_struct {
632#ifdef CONFIG_THREAD_INFO_IN_TASK
633
634
635
636
637 struct thread_info thread_info;
638#endif
639
640 volatile long state;
641
642
643
644
645
646 randomized_struct_fields_start
647
648 void *stack;
649 refcount_t usage;
650
651 unsigned int flags;
652 unsigned int ptrace;
653
654#ifdef CONFIG_SMP
655 int on_cpu;
656 struct __call_single_node wake_entry;
657#ifdef CONFIG_THREAD_INFO_IN_TASK
658
659 unsigned int cpu;
660#endif
661 unsigned int wakee_flips;
662 unsigned long wakee_flip_decay_ts;
663 struct task_struct *last_wakee;
664
665
666
667
668
669
670
671
672 int recent_used_cpu;
673 int wake_cpu;
674#endif
675 int on_rq;
676
677 int prio;
678 int static_prio;
679 int normal_prio;
680 unsigned int rt_priority;
681
682 const struct sched_class *sched_class;
683 struct sched_entity se;
684 struct sched_rt_entity rt;
685#ifdef CONFIG_CGROUP_SCHED
686 struct task_group *sched_task_group;
687#endif
688 struct sched_dl_entity dl;
689
690#ifdef CONFIG_UCLAMP_TASK
691
692
693
694
695 struct uclamp_se uclamp_req[UCLAMP_CNT];
696
697
698
699
700 struct uclamp_se uclamp[UCLAMP_CNT];
701#endif
702
703#ifdef CONFIG_PREEMPT_NOTIFIERS
704
705 struct hlist_head preempt_notifiers;
706#endif
707
708#ifdef CONFIG_BLK_DEV_IO_TRACE
709 unsigned int btrace_seq;
710#endif
711
712 unsigned int policy;
713 int nr_cpus_allowed;
714 const cpumask_t *cpus_ptr;
715 cpumask_t cpus_mask;
716
717#ifdef CONFIG_PREEMPT_RCU
718 int rcu_read_lock_nesting;
719 union rcu_special rcu_read_unlock_special;
720 struct list_head rcu_node_entry;
721 struct rcu_node *rcu_blocked_node;
722#endif
723
724#ifdef CONFIG_TASKS_RCU
725 unsigned long rcu_tasks_nvcsw;
726 u8 rcu_tasks_holdout;
727 u8 rcu_tasks_idx;
728 int rcu_tasks_idle_cpu;
729 struct list_head rcu_tasks_holdout_list;
730#endif
731
732#ifdef CONFIG_TASKS_TRACE_RCU
733 int trc_reader_nesting;
734 int trc_ipi_to_cpu;
735 union rcu_special trc_reader_special;
736 bool trc_reader_checked;
737 struct list_head trc_holdout_list;
738#endif
739
740 struct sched_info sched_info;
741
742 struct list_head tasks;
743#ifdef CONFIG_SMP
744 struct plist_node pushable_tasks;
745 struct rb_node pushable_dl_tasks;
746#endif
747
748 struct mm_struct *mm;
749 struct mm_struct *active_mm;
750
751
752 struct vmacache vmacache;
753
754#ifdef SPLIT_RSS_COUNTING
755 struct task_rss_stat rss_stat;
756#endif
757 int exit_state;
758 int exit_code;
759 int exit_signal;
760
761 int pdeath_signal;
762
763 unsigned long jobctl;
764
765
766 unsigned int personality;
767
768
769 unsigned sched_reset_on_fork:1;
770 unsigned sched_contributes_to_load:1;
771 unsigned sched_migrated:1;
772 unsigned sched_remote_wakeup:1;
773#ifdef CONFIG_PSI
774 unsigned sched_psi_wake_requeue:1;
775#endif
776
777
778 unsigned :0;
779
780
781
782
783 unsigned in_execve:1;
784 unsigned in_iowait:1;
785#ifndef TIF_RESTORE_SIGMASK
786 unsigned restore_sigmask:1;
787#endif
788#ifdef CONFIG_MEMCG
789 unsigned in_user_fault:1;
790#endif
791#ifdef CONFIG_COMPAT_BRK
792 unsigned brk_randomized:1;
793#endif
794#ifdef CONFIG_CGROUPS
795
796 unsigned no_cgroup_migration:1;
797
798 unsigned frozen:1;
799#endif
800#ifdef CONFIG_BLK_CGROUP
801 unsigned use_memdelay:1;
802#endif
803#ifdef CONFIG_PSI
804
805 unsigned in_memstall:1;
806#endif
807
808 unsigned long atomic_flags;
809
810 struct restart_block restart_block;
811
812 pid_t pid;
813 pid_t tgid;
814
815#ifdef CONFIG_STACKPROTECTOR
816
817 unsigned long stack_canary;
818#endif
819
820
821
822
823
824
825
826 struct task_struct __rcu *real_parent;
827
828
829 struct task_struct __rcu *parent;
830
831
832
833
834 struct list_head children;
835 struct list_head sibling;
836 struct task_struct *group_leader;
837
838
839
840
841
842
843
844 struct list_head ptraced;
845 struct list_head ptrace_entry;
846
847
848 struct pid *thread_pid;
849 struct hlist_node pid_links[PIDTYPE_MAX];
850 struct list_head thread_group;
851 struct list_head thread_node;
852
853 struct completion *vfork_done;
854
855
856 int __user *set_child_tid;
857
858
859 int __user *clear_child_tid;
860
861 u64 utime;
862 u64 stime;
863#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
864 u64 utimescaled;
865 u64 stimescaled;
866#endif
867 u64 gtime;
868 struct prev_cputime prev_cputime;
869#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
870 struct vtime vtime;
871#endif
872
873#ifdef CONFIG_NO_HZ_FULL
874 atomic_t tick_dep_mask;
875#endif
876
877 unsigned long nvcsw;
878 unsigned long nivcsw;
879
880
881 u64 start_time;
882
883
884 u64 start_boottime;
885
886
887 unsigned long min_flt;
888 unsigned long maj_flt;
889
890
891 struct posix_cputimers posix_cputimers;
892
893#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
894 struct posix_cputimers_work posix_cputimers_work;
895#endif
896
897
898
899
900 const struct cred __rcu *ptracer_cred;
901
902
903 const struct cred __rcu *real_cred;
904
905
906 const struct cred __rcu *cred;
907
908#ifdef CONFIG_KEYS
909
910 struct key *cached_requested_key;
911#endif
912
913
914
915
916
917
918
919
920 char comm[TASK_COMM_LEN];
921
922 struct nameidata *nameidata;
923
924#ifdef CONFIG_SYSVIPC
925 struct sysv_sem sysvsem;
926 struct sysv_shm sysvshm;
927#endif
928#ifdef CONFIG_DETECT_HUNG_TASK
929 unsigned long last_switch_count;
930 unsigned long last_switch_time;
931#endif
932
933 struct fs_struct *fs;
934
935
936 struct files_struct *files;
937
938
939 struct nsproxy *nsproxy;
940
941
942 struct signal_struct *signal;
943 struct sighand_struct __rcu *sighand;
944 sigset_t blocked;
945 sigset_t real_blocked;
946
947 sigset_t saved_sigmask;
948 struct sigpending pending;
949 unsigned long sas_ss_sp;
950 size_t sas_ss_size;
951 unsigned int sas_ss_flags;
952
953 struct callback_head *task_works;
954
955#ifdef CONFIG_AUDIT
956#ifdef CONFIG_AUDITSYSCALL
957 struct audit_context *audit_context;
958#endif
959 kuid_t loginuid;
960 unsigned int sessionid;
961#endif
962 struct seccomp seccomp;
963
964
965 u64 parent_exec_id;
966 u64 self_exec_id;
967
968
969 spinlock_t alloc_lock;
970
971
972 raw_spinlock_t pi_lock;
973
974 struct wake_q_node wake_q;
975
976#ifdef CONFIG_RT_MUTEXES
977
978 struct rb_root_cached pi_waiters;
979
980 struct task_struct *pi_top_task;
981
982 struct rt_mutex_waiter *pi_blocked_on;
983#endif
984
985#ifdef CONFIG_DEBUG_MUTEXES
986
987 struct mutex_waiter *blocked_on;
988#endif
989
990#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
991 int non_block_count;
992#endif
993
994#ifdef CONFIG_TRACE_IRQFLAGS
995 struct irqtrace_events irqtrace;
996 unsigned int hardirq_threaded;
997 u64 hardirq_chain_key;
998 int softirqs_enabled;
999 int softirq_context;
1000 int irq_config;
1001#endif
1002
1003#ifdef CONFIG_LOCKDEP
1004# define MAX_LOCK_DEPTH 48UL
1005 u64 curr_chain_key;
1006 int lockdep_depth;
1007 unsigned int lockdep_recursion;
1008 struct held_lock held_locks[MAX_LOCK_DEPTH];
1009#endif
1010
1011#ifdef CONFIG_UBSAN
1012 unsigned int in_ubsan;
1013#endif
1014
1015
1016 void *journal_info;
1017
1018
1019 struct bio_list *bio_list;
1020
1021#ifdef CONFIG_BLOCK
1022
1023 struct blk_plug *plug;
1024#endif
1025
1026
1027 struct reclaim_state *reclaim_state;
1028
1029 struct backing_dev_info *backing_dev_info;
1030
1031 struct io_context *io_context;
1032
1033#ifdef CONFIG_COMPACTION
1034 struct capture_control *capture_control;
1035#endif
1036
1037 unsigned long ptrace_message;
1038 kernel_siginfo_t *last_siginfo;
1039
1040 struct task_io_accounting ioac;
1041#ifdef CONFIG_PSI
1042
1043 unsigned int psi_flags;
1044#endif
1045#ifdef CONFIG_TASK_XACCT
1046
1047 u64 acct_rss_mem1;
1048
1049 u64 acct_vm_mem1;
1050
1051 u64 acct_timexpd;
1052#endif
1053#ifdef CONFIG_CPUSETS
1054
1055 nodemask_t mems_allowed;
1056
1057 seqcount_spinlock_t mems_allowed_seq;
1058 int cpuset_mem_spread_rotor;
1059 int cpuset_slab_spread_rotor;
1060#endif
1061#ifdef CONFIG_CGROUPS
1062
1063 struct css_set __rcu *cgroups;
1064
1065 struct list_head cg_list;
1066#endif
1067#ifdef CONFIG_X86_CPU_RESCTRL
1068 u32 closid;
1069 u32 rmid;
1070#endif
1071#ifdef CONFIG_FUTEX
1072 struct robust_list_head __user *robust_list;
1073#ifdef CONFIG_COMPAT
1074 struct compat_robust_list_head __user *compat_robust_list;
1075#endif
1076 struct list_head pi_state_list;
1077 struct futex_pi_state *pi_state_cache;
1078 struct mutex futex_exit_mutex;
1079 unsigned int futex_state;
1080#endif
1081#ifdef CONFIG_PERF_EVENTS
1082 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1083 struct mutex perf_event_mutex;
1084 struct list_head perf_event_list;
1085#endif
1086#ifdef CONFIG_DEBUG_PREEMPT
1087 unsigned long preempt_disable_ip;
1088#endif
1089#ifdef CONFIG_NUMA
1090
1091 struct mempolicy *mempolicy;
1092 short il_prev;
1093 short pref_node_fork;
1094#endif
1095#ifdef CONFIG_NUMA_BALANCING
1096 int numa_scan_seq;
1097 unsigned int numa_scan_period;
1098 unsigned int numa_scan_period_max;
1099 int numa_preferred_nid;
1100 unsigned long numa_migrate_retry;
1101
1102 u64 node_stamp;
1103 u64 last_task_numa_placement;
1104 u64 last_sum_exec_runtime;
1105 struct callback_head numa_work;
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115 struct numa_group __rcu *numa_group;
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 unsigned long *numa_faults;
1132 unsigned long total_numa_faults;
1133
1134
1135
1136
1137
1138
1139
1140 unsigned long numa_faults_locality[3];
1141
1142 unsigned long numa_pages_migrated;
1143#endif
1144
1145#ifdef CONFIG_RSEQ
1146 struct rseq __user *rseq;
1147 u32 rseq_sig;
1148
1149
1150
1151
1152 unsigned long rseq_event_mask;
1153#endif
1154
1155 struct tlbflush_unmap_batch tlb_ubc;
1156
1157 union {
1158 refcount_t rcu_users;
1159 struct rcu_head rcu;
1160 };
1161
1162
1163 struct pipe_inode_info *splice_pipe;
1164
1165 struct page_frag task_frag;
1166
1167#ifdef CONFIG_TASK_DELAY_ACCT
1168 struct task_delay_info *delays;
1169#endif
1170
1171#ifdef CONFIG_FAULT_INJECTION
1172 int make_it_fail;
1173 unsigned int fail_nth;
1174#endif
1175
1176
1177
1178
1179 int nr_dirtied;
1180 int nr_dirtied_pause;
1181
1182 unsigned long dirty_paused_when;
1183
1184#ifdef CONFIG_LATENCYTOP
1185 int latency_record_count;
1186 struct latency_record latency_record[LT_SAVECOUNT];
1187#endif
1188
1189
1190
1191
1192 u64 timer_slack_ns;
1193 u64 default_timer_slack_ns;
1194
1195#ifdef CONFIG_KASAN
1196 unsigned int kasan_depth;
1197#endif
1198
1199#ifdef CONFIG_KCSAN
1200 struct kcsan_ctx kcsan_ctx;
1201#ifdef CONFIG_TRACE_IRQFLAGS
1202 struct irqtrace_events kcsan_save_irqtrace;
1203#endif
1204#endif
1205
1206#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1207
1208 int curr_ret_stack;
1209 int curr_ret_depth;
1210
1211
1212 struct ftrace_ret_stack *ret_stack;
1213
1214
1215 unsigned long long ftrace_timestamp;
1216
1217
1218
1219
1220
1221 atomic_t trace_overrun;
1222
1223
1224 atomic_t tracing_graph_pause;
1225#endif
1226
1227#ifdef CONFIG_TRACING
1228
1229 unsigned long trace;
1230
1231
1232 unsigned long trace_recursion;
1233#endif
1234
1235#ifdef CONFIG_KCOV
1236
1237
1238
1239 unsigned int kcov_mode;
1240
1241
1242 unsigned int kcov_size;
1243
1244
1245 void *kcov_area;
1246
1247
1248 struct kcov *kcov;
1249
1250
1251 u64 kcov_handle;
1252
1253
1254 int kcov_sequence;
1255
1256
1257 unsigned int kcov_softirq;
1258#endif
1259
1260#ifdef CONFIG_MEMCG
1261 struct mem_cgroup *memcg_in_oom;
1262 gfp_t memcg_oom_gfp_mask;
1263 int memcg_oom_order;
1264
1265
1266 unsigned int memcg_nr_pages_over_high;
1267
1268
1269 struct mem_cgroup *active_memcg;
1270#endif
1271
1272#ifdef CONFIG_BLK_CGROUP
1273 struct request_queue *throttle_queue;
1274#endif
1275
1276#ifdef CONFIG_UPROBES
1277 struct uprobe_task *utask;
1278#endif
1279#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1280 unsigned int sequential_io;
1281 unsigned int sequential_io_avg;
1282#endif
1283#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1284 unsigned long task_state_change;
1285#endif
1286 int pagefault_disabled;
1287#ifdef CONFIG_MMU
1288 struct task_struct *oom_reaper_list;
1289#endif
1290#ifdef CONFIG_VMAP_STACK
1291 struct vm_struct *stack_vm_area;
1292#endif
1293#ifdef CONFIG_THREAD_INFO_IN_TASK
1294
1295 refcount_t stack_refcount;
1296#endif
1297#ifdef CONFIG_LIVEPATCH
1298 int patch_state;
1299#endif
1300#ifdef CONFIG_SECURITY
1301
1302 void *security;
1303#endif
1304
1305#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1306 unsigned long lowest_stack;
1307 unsigned long prev_lowest_stack;
1308#endif
1309
1310#ifdef CONFIG_X86_MCE
1311 u64 mce_addr;
1312 __u64 mce_ripv : 1,
1313 mce_whole_page : 1,
1314 __mce_reserved : 62;
1315 struct callback_head mce_kill_me;
1316#endif
1317
1318
1319
1320
1321
1322 randomized_struct_fields_end
1323
1324
1325 struct thread_struct thread;
1326
1327
1328
1329
1330
1331
1332
1333};
1334
1335static inline struct pid *task_pid(struct task_struct *task)
1336{
1337 return task->thread_pid;
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1352
1353static inline pid_t task_pid_nr(struct task_struct *tsk)
1354{
1355 return tsk->pid;
1356}
1357
1358static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1359{
1360 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1361}
1362
1363static inline pid_t task_pid_vnr(struct task_struct *tsk)
1364{
1365 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1366}
1367
1368
1369static inline pid_t task_tgid_nr(struct task_struct *tsk)
1370{
1371 return tsk->tgid;
1372}
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384static inline int pid_alive(const struct task_struct *p)
1385{
1386 return p->thread_pid != NULL;
1387}
1388
1389static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1390{
1391 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1392}
1393
1394static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1395{
1396 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1397}
1398
1399
1400static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1401{
1402 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1403}
1404
1405static inline pid_t task_session_vnr(struct task_struct *tsk)
1406{
1407 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1408}
1409
1410static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1411{
1412 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1413}
1414
1415static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1416{
1417 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1418}
1419
1420static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1421{
1422 pid_t pid = 0;
1423
1424 rcu_read_lock();
1425 if (pid_alive(tsk))
1426 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1427 rcu_read_unlock();
1428
1429 return pid;
1430}
1431
1432static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1433{
1434 return task_ppid_nr_ns(tsk, &init_pid_ns);
1435}
1436
1437
1438static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1439{
1440 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1441}
1442
1443#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1444#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1445
1446static inline unsigned int task_state_index(struct task_struct *tsk)
1447{
1448 unsigned int tsk_state = READ_ONCE(tsk->state);
1449 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1450
1451 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1452
1453 if (tsk_state == TASK_IDLE)
1454 state = TASK_REPORT_IDLE;
1455
1456 return fls(state);
1457}
1458
1459static inline char task_index_to_char(unsigned int state)
1460{
1461 static const char state_char[] = "RSDTtXZPI";
1462
1463 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1464
1465 return state_char[state];
1466}
1467
1468static inline char task_state_to_char(struct task_struct *tsk)
1469{
1470 return task_index_to_char(task_state_index(tsk));
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static inline int is_global_init(struct task_struct *tsk)
1483{
1484 return task_tgid_nr(tsk) == 1;
1485}
1486
1487extern struct pid *cad_pid;
1488
1489
1490
1491
1492#define PF_IDLE 0x00000002
1493#define PF_EXITING 0x00000004
1494#define PF_VCPU 0x00000010
1495#define PF_WQ_WORKER 0x00000020
1496#define PF_FORKNOEXEC 0x00000040
1497#define PF_MCE_PROCESS 0x00000080
1498#define PF_SUPERPRIV 0x00000100
1499#define PF_DUMPCORE 0x00000200
1500#define PF_SIGNALED 0x00000400
1501#define PF_MEMALLOC 0x00000800
1502#define PF_NPROC_EXCEEDED 0x00001000
1503#define PF_USED_MATH 0x00002000
1504#define PF_USED_ASYNC 0x00004000
1505#define PF_NOFREEZE 0x00008000
1506#define PF_FROZEN 0x00010000
1507#define PF_KSWAPD 0x00020000
1508#define PF_MEMALLOC_NOFS 0x00040000
1509#define PF_MEMALLOC_NOIO 0x00080000
1510#define PF_LOCAL_THROTTLE 0x00100000
1511
1512#define PF_KTHREAD 0x00200000
1513#define PF_RANDOMIZE 0x00400000
1514#define PF_SWAPWRITE 0x00800000
1515#define PF_NO_SETAFFINITY 0x04000000
1516#define PF_MCE_EARLY 0x08000000
1517#define PF_MEMALLOC_NOCMA 0x10000000
1518#define PF_IO_WORKER 0x20000000
1519#define PF_FREEZER_SKIP 0x40000000
1520#define PF_SUSPEND_TASK 0x80000000
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1534#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1535#define clear_used_math() clear_stopped_child_used_math(current)
1536#define set_used_math() set_stopped_child_used_math(current)
1537
1538#define conditional_stopped_child_used_math(condition, child) \
1539 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1540
1541#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1542
1543#define copy_to_stopped_child_used_math(child) \
1544 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1545
1546
1547#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1548#define used_math() tsk_used_math(current)
1549
1550static inline bool is_percpu_thread(void)
1551{
1552#ifdef CONFIG_SMP
1553 return (current->flags & PF_NO_SETAFFINITY) &&
1554 (current->nr_cpus_allowed == 1);
1555#else
1556 return true;
1557#endif
1558}
1559
1560
1561#define PFA_NO_NEW_PRIVS 0
1562#define PFA_SPREAD_PAGE 1
1563#define PFA_SPREAD_SLAB 2
1564#define PFA_SPEC_SSB_DISABLE 3
1565#define PFA_SPEC_SSB_FORCE_DISABLE 4
1566#define PFA_SPEC_IB_DISABLE 5
1567#define PFA_SPEC_IB_FORCE_DISABLE 6
1568#define PFA_SPEC_SSB_NOEXEC 7
1569
1570#define TASK_PFA_TEST(name, func) \
1571 static inline bool task_##func(struct task_struct *p) \
1572 { return test_bit(PFA_##name, &p->atomic_flags); }
1573
1574#define TASK_PFA_SET(name, func) \
1575 static inline void task_set_##func(struct task_struct *p) \
1576 { set_bit(PFA_##name, &p->atomic_flags); }
1577
1578#define TASK_PFA_CLEAR(name, func) \
1579 static inline void task_clear_##func(struct task_struct *p) \
1580 { clear_bit(PFA_##name, &p->atomic_flags); }
1581
1582TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1583TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1584
1585TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1586TASK_PFA_SET(SPREAD_PAGE, spread_page)
1587TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1588
1589TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1590TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1591TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1592
1593TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1594TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1595TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1596
1597TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1598TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1599TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1600
1601TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1602TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1603
1604TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1605TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1606TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1607
1608TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1609TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1610
1611static inline void
1612current_restore_flags(unsigned long orig_flags, unsigned long flags)
1613{
1614 current->flags &= ~flags;
1615 current->flags |= orig_flags & flags;
1616}
1617
1618extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1619extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1620#ifdef CONFIG_SMP
1621extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1622extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1623#else
1624static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1625{
1626}
1627static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1628{
1629 if (!cpumask_test_cpu(0, new_mask))
1630 return -EINVAL;
1631 return 0;
1632}
1633#endif
1634
1635extern int yield_to(struct task_struct *p, bool preempt);
1636extern void set_user_nice(struct task_struct *p, long nice);
1637extern int task_prio(const struct task_struct *p);
1638
1639
1640
1641
1642
1643
1644
1645static inline int task_nice(const struct task_struct *p)
1646{
1647 return PRIO_TO_NICE((p)->static_prio);
1648}
1649
1650extern int can_nice(const struct task_struct *p, const int nice);
1651extern int task_curr(const struct task_struct *p);
1652extern int idle_cpu(int cpu);
1653extern int available_idle_cpu(int cpu);
1654extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1655extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1656extern void sched_set_fifo(struct task_struct *p);
1657extern void sched_set_fifo_low(struct task_struct *p);
1658extern void sched_set_normal(struct task_struct *p, int nice);
1659extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1660extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1661extern struct task_struct *idle_task(int cpu);
1662
1663
1664
1665
1666
1667
1668
1669static __always_inline bool is_idle_task(const struct task_struct *p)
1670{
1671 return !!(p->flags & PF_IDLE);
1672}
1673
1674extern struct task_struct *curr_task(int cpu);
1675extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1676
1677void yield(void);
1678
1679union thread_union {
1680#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1681 struct task_struct task;
1682#endif
1683#ifndef CONFIG_THREAD_INFO_IN_TASK
1684 struct thread_info thread_info;
1685#endif
1686 unsigned long stack[THREAD_SIZE/sizeof(long)];
1687};
1688
1689#ifndef CONFIG_THREAD_INFO_IN_TASK
1690extern struct thread_info init_thread_info;
1691#endif
1692
1693extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1694
1695#ifdef CONFIG_THREAD_INFO_IN_TASK
1696static inline struct thread_info *task_thread_info(struct task_struct *task)
1697{
1698 return &task->thread_info;
1699}
1700#elif !defined(__HAVE_THREAD_FUNCTIONS)
1701# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1702#endif
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715extern struct task_struct *find_task_by_vpid(pid_t nr);
1716extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1717
1718
1719
1720
1721extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1722
1723extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1724extern int wake_up_process(struct task_struct *tsk);
1725extern void wake_up_new_task(struct task_struct *tsk);
1726
1727#ifdef CONFIG_SMP
1728extern void kick_process(struct task_struct *tsk);
1729#else
1730static inline void kick_process(struct task_struct *tsk) { }
1731#endif
1732
1733extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1734
1735static inline void set_task_comm(struct task_struct *tsk, const char *from)
1736{
1737 __set_task_comm(tsk, from, false);
1738}
1739
1740extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1741#define get_task_comm(buf, tsk) ({ \
1742 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1743 __get_task_comm(buf, sizeof(buf), tsk); \
1744})
1745
1746#ifdef CONFIG_SMP
1747static __always_inline void scheduler_ipi(void)
1748{
1749
1750
1751
1752
1753
1754 preempt_fold_need_resched();
1755}
1756extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1757#else
1758static inline void scheduler_ipi(void) { }
1759static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1760{
1761 return 1;
1762}
1763#endif
1764
1765
1766
1767
1768
1769static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1770{
1771 set_ti_thread_flag(task_thread_info(tsk), flag);
1772}
1773
1774static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1775{
1776 clear_ti_thread_flag(task_thread_info(tsk), flag);
1777}
1778
1779static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1780 bool value)
1781{
1782 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1783}
1784
1785static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1786{
1787 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1788}
1789
1790static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1791{
1792 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1793}
1794
1795static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1796{
1797 return test_ti_thread_flag(task_thread_info(tsk), flag);
1798}
1799
1800static inline void set_tsk_need_resched(struct task_struct *tsk)
1801{
1802 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1803}
1804
1805static inline void clear_tsk_need_resched(struct task_struct *tsk)
1806{
1807 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1808}
1809
1810static inline int test_tsk_need_resched(struct task_struct *tsk)
1811{
1812 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1813}
1814
1815
1816
1817
1818
1819
1820
1821#ifndef CONFIG_PREEMPTION
1822extern int _cond_resched(void);
1823#else
1824static inline int _cond_resched(void) { return 0; }
1825#endif
1826
1827#define cond_resched() ({ \
1828 ___might_sleep(__FILE__, __LINE__, 0); \
1829 _cond_resched(); \
1830})
1831
1832extern int __cond_resched_lock(spinlock_t *lock);
1833
1834#define cond_resched_lock(lock) ({ \
1835 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1836 __cond_resched_lock(lock); \
1837})
1838
1839static inline void cond_resched_rcu(void)
1840{
1841#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1842 rcu_read_unlock();
1843 cond_resched();
1844 rcu_read_lock();
1845#endif
1846}
1847
1848
1849
1850
1851
1852
1853static inline int spin_needbreak(spinlock_t *lock)
1854{
1855#ifdef CONFIG_PREEMPTION
1856 return spin_is_contended(lock);
1857#else
1858 return 0;
1859#endif
1860}
1861
1862static __always_inline bool need_resched(void)
1863{
1864 return unlikely(tif_need_resched());
1865}
1866
1867
1868
1869
1870#ifdef CONFIG_SMP
1871
1872static inline unsigned int task_cpu(const struct task_struct *p)
1873{
1874#ifdef CONFIG_THREAD_INFO_IN_TASK
1875 return READ_ONCE(p->cpu);
1876#else
1877 return READ_ONCE(task_thread_info(p)->cpu);
1878#endif
1879}
1880
1881extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1882
1883#else
1884
1885static inline unsigned int task_cpu(const struct task_struct *p)
1886{
1887 return 0;
1888}
1889
1890static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1891{
1892}
1893
1894#endif
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904#ifndef vcpu_is_preempted
1905static inline bool vcpu_is_preempted(int cpu)
1906{
1907 return false;
1908}
1909#endif
1910
1911extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1912extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1913
1914#ifndef TASK_SIZE_OF
1915#define TASK_SIZE_OF(tsk) TASK_SIZE
1916#endif
1917
1918#ifdef CONFIG_RSEQ
1919
1920
1921
1922
1923
1924enum rseq_event_mask_bits {
1925 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1926 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1927 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1928};
1929
1930enum rseq_event_mask {
1931 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
1932 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
1933 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
1934};
1935
1936static inline void rseq_set_notify_resume(struct task_struct *t)
1937{
1938 if (t->rseq)
1939 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1940}
1941
1942void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1943
1944static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1945 struct pt_regs *regs)
1946{
1947 if (current->rseq)
1948 __rseq_handle_notify_resume(ksig, regs);
1949}
1950
1951static inline void rseq_signal_deliver(struct ksignal *ksig,
1952 struct pt_regs *regs)
1953{
1954 preempt_disable();
1955 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
1956 preempt_enable();
1957 rseq_handle_notify_resume(ksig, regs);
1958}
1959
1960
1961static inline void rseq_preempt(struct task_struct *t)
1962{
1963 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
1964 rseq_set_notify_resume(t);
1965}
1966
1967
1968static inline void rseq_migrate(struct task_struct *t)
1969{
1970 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
1971 rseq_set_notify_resume(t);
1972}
1973
1974
1975
1976
1977
1978static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1979{
1980 if (clone_flags & CLONE_VM) {
1981 t->rseq = NULL;
1982 t->rseq_sig = 0;
1983 t->rseq_event_mask = 0;
1984 } else {
1985 t->rseq = current->rseq;
1986 t->rseq_sig = current->rseq_sig;
1987 t->rseq_event_mask = current->rseq_event_mask;
1988 }
1989}
1990
1991static inline void rseq_execve(struct task_struct *t)
1992{
1993 t->rseq = NULL;
1994 t->rseq_sig = 0;
1995 t->rseq_event_mask = 0;
1996}
1997
1998#else
1999
2000static inline void rseq_set_notify_resume(struct task_struct *t)
2001{
2002}
2003static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2004 struct pt_regs *regs)
2005{
2006}
2007static inline void rseq_signal_deliver(struct ksignal *ksig,
2008 struct pt_regs *regs)
2009{
2010}
2011static inline void rseq_preempt(struct task_struct *t)
2012{
2013}
2014static inline void rseq_migrate(struct task_struct *t)
2015{
2016}
2017static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2018{
2019}
2020static inline void rseq_execve(struct task_struct *t)
2021{
2022}
2023
2024#endif
2025
2026#ifdef CONFIG_DEBUG_RSEQ
2027
2028void rseq_syscall(struct pt_regs *regs);
2029
2030#else
2031
2032static inline void rseq_syscall(struct pt_regs *regs)
2033{
2034}
2035
2036#endif
2037
2038const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2039char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2040int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2041
2042const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2043const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2044const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2045
2046int sched_trace_rq_cpu(struct rq *rq);
2047int sched_trace_rq_nr_running(struct rq *rq);
2048
2049const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2050
2051#endif
2052