1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/kcov.h>
18#include <linux/mutex.h>
19#include <linux/plist.h>
20#include <linux/hrtimer.h>
21#include <linux/seccomp.h>
22#include <linux/nodemask.h>
23#include <linux/rcupdate.h>
24#include <linux/refcount.h>
25#include <linux/resource.h>
26#include <linux/latencytop.h>
27#include <linux/sched/prio.h>
28#include <linux/signal_types.h>
29#include <linux/mm_types_task.h>
30#include <linux/task_io_accounting.h>
31#include <linux/rseq.h>
32
33
34struct audit_context;
35struct backing_dev_info;
36struct bio_list;
37struct blk_plug;
38struct capture_control;
39struct cfs_rq;
40struct fs_struct;
41struct futex_pi_state;
42struct io_context;
43struct mempolicy;
44struct nameidata;
45struct nsproxy;
46struct perf_event_context;
47struct pid_namespace;
48struct pipe_inode_info;
49struct rcu_node;
50struct reclaim_state;
51struct robust_list_head;
52struct root_domain;
53struct rq;
54struct sched_attr;
55struct sched_param;
56struct seq_file;
57struct sighand_struct;
58struct signal_struct;
59struct task_delay_info;
60struct task_group;
61
62
63
64
65
66
67
68
69
70
71
72
73
74#define TASK_RUNNING 0x0000
75#define TASK_INTERRUPTIBLE 0x0001
76#define TASK_UNINTERRUPTIBLE 0x0002
77#define __TASK_STOPPED 0x0004
78#define __TASK_TRACED 0x0008
79
80#define EXIT_DEAD 0x0010
81#define EXIT_ZOMBIE 0x0020
82#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
83
84#define TASK_PARKED 0x0040
85#define TASK_DEAD 0x0080
86#define TASK_WAKEKILL 0x0100
87#define TASK_WAKING 0x0200
88#define TASK_NOLOAD 0x0400
89#define TASK_NEW 0x0800
90#define TASK_STATE_MAX 0x1000
91
92
93#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
94#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
95#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
96
97#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
98
99
100#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
101
102
103#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
104 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
105 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
106 TASK_PARKED)
107
108#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
109
110#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
111
112#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
113
114#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
115 (task->flags & PF_FROZEN) == 0 && \
116 (task->state & TASK_NOLOAD) == 0)
117
118#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
119
120
121
122
123
124#define is_special_task_state(state) \
125 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
126
127#define __set_current_state(state_value) \
128 do { \
129 WARN_ON_ONCE(is_special_task_state(state_value));\
130 current->task_state_change = _THIS_IP_; \
131 current->state = (state_value); \
132 } while (0)
133
134#define set_current_state(state_value) \
135 do { \
136 WARN_ON_ONCE(is_special_task_state(state_value));\
137 current->task_state_change = _THIS_IP_; \
138 smp_store_mb(current->state, (state_value)); \
139 } while (0)
140
141#define set_special_state(state_value) \
142 do { \
143 unsigned long flags; \
144 WARN_ON_ONCE(!is_special_task_state(state_value)); \
145 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
146 current->task_state_change = _THIS_IP_; \
147 current->state = (state_value); \
148 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
149 } while (0)
150#else
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188#define __set_current_state(state_value) \
189 current->state = (state_value)
190
191#define set_current_state(state_value) \
192 smp_store_mb(current->state, (state_value))
193
194
195
196
197
198
199
200#define set_special_state(state_value) \
201 do { \
202 unsigned long flags; \
203 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
204 current->state = (state_value); \
205 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
206 } while (0)
207
208#endif
209
210
211#define TASK_COMM_LEN 16
212
213extern void scheduler_tick(void);
214
215#define MAX_SCHEDULE_TIMEOUT LONG_MAX
216
217extern long schedule_timeout(long timeout);
218extern long schedule_timeout_interruptible(long timeout);
219extern long schedule_timeout_killable(long timeout);
220extern long schedule_timeout_uninterruptible(long timeout);
221extern long schedule_timeout_idle(long timeout);
222asmlinkage void schedule(void);
223extern void schedule_preempt_disabled(void);
224
225extern int __must_check io_schedule_prepare(void);
226extern void io_schedule_finish(int token);
227extern long io_schedule_timeout(long timeout);
228extern void io_schedule(void);
229
230
231
232
233
234
235
236
237
238
239struct prev_cputime {
240#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
241 u64 utime;
242 u64 stime;
243 raw_spinlock_t lock;
244#endif
245};
246
247
248
249
250
251
252
253
254
255
256
257struct task_cputime {
258 u64 utime;
259 u64 stime;
260 unsigned long long sum_exec_runtime;
261};
262
263
264#define virt_exp utime
265#define prof_exp stime
266#define sched_exp sum_exec_runtime
267
268enum vtime_state {
269
270 VTIME_INACTIVE = 0,
271
272 VTIME_USER,
273
274 VTIME_SYS,
275};
276
277struct vtime {
278 seqcount_t seqcount;
279 unsigned long long starttime;
280 enum vtime_state state;
281 u64 utime;
282 u64 stime;
283 u64 gtime;
284};
285
286
287
288
289
290
291
292enum uclamp_id {
293 UCLAMP_MIN = 0,
294 UCLAMP_MAX,
295 UCLAMP_CNT
296};
297
298struct sched_info {
299#ifdef CONFIG_SCHED_INFO
300
301
302
303 unsigned long pcount;
304
305
306 unsigned long long run_delay;
307
308
309
310
311 unsigned long long last_arrival;
312
313
314 unsigned long long last_queued;
315
316#endif
317};
318
319
320
321
322
323
324
325
326# define SCHED_FIXEDPOINT_SHIFT 10
327# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
328
329
330# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
331# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
332
333struct load_weight {
334 unsigned long weight;
335 u32 inv_weight;
336};
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360struct util_est {
361 unsigned int enqueued;
362 unsigned int ewma;
363#define UTIL_EST_WEIGHT_SHIFT 2
364} __attribute__((__aligned__(sizeof(u64))));
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409struct sched_avg {
410 u64 last_update_time;
411 u64 load_sum;
412 u64 runnable_load_sum;
413 u32 util_sum;
414 u32 period_contrib;
415 unsigned long load_avg;
416 unsigned long runnable_load_avg;
417 unsigned long util_avg;
418 struct util_est util_est;
419} ____cacheline_aligned;
420
421struct sched_statistics {
422#ifdef CONFIG_SCHEDSTATS
423 u64 wait_start;
424 u64 wait_max;
425 u64 wait_count;
426 u64 wait_sum;
427 u64 iowait_count;
428 u64 iowait_sum;
429
430 u64 sleep_start;
431 u64 sleep_max;
432 s64 sum_sleep_runtime;
433
434 u64 block_start;
435 u64 block_max;
436 u64 exec_max;
437 u64 slice_max;
438
439 u64 nr_migrations_cold;
440 u64 nr_failed_migrations_affine;
441 u64 nr_failed_migrations_running;
442 u64 nr_failed_migrations_hot;
443 u64 nr_forced_migrations;
444
445 u64 nr_wakeups;
446 u64 nr_wakeups_sync;
447 u64 nr_wakeups_migrate;
448 u64 nr_wakeups_local;
449 u64 nr_wakeups_remote;
450 u64 nr_wakeups_affine;
451 u64 nr_wakeups_affine_attempts;
452 u64 nr_wakeups_passive;
453 u64 nr_wakeups_idle;
454#endif
455};
456
457struct sched_entity {
458
459 struct load_weight load;
460 unsigned long runnable_weight;
461 struct rb_node run_node;
462 struct list_head group_node;
463 unsigned int on_rq;
464
465 u64 exec_start;
466 u64 sum_exec_runtime;
467 u64 vruntime;
468 u64 prev_sum_exec_runtime;
469
470 u64 nr_migrations;
471
472 struct sched_statistics statistics;
473
474#ifdef CONFIG_FAIR_GROUP_SCHED
475 int depth;
476 struct sched_entity *parent;
477
478 struct cfs_rq *cfs_rq;
479
480 struct cfs_rq *my_q;
481#endif
482
483#ifdef CONFIG_SMP
484
485
486
487
488
489
490 struct sched_avg avg;
491#endif
492};
493
494struct sched_rt_entity {
495 struct list_head run_list;
496 unsigned long timeout;
497 unsigned long watchdog_stamp;
498 unsigned int time_slice;
499 unsigned short on_rq;
500 unsigned short on_list;
501
502 struct sched_rt_entity *back;
503#ifdef CONFIG_RT_GROUP_SCHED
504 struct sched_rt_entity *parent;
505
506 struct rt_rq *rt_rq;
507
508 struct rt_rq *my_q;
509#endif
510} __randomize_layout;
511
512struct sched_dl_entity {
513 struct rb_node rb_node;
514
515
516
517
518
519
520 u64 dl_runtime;
521 u64 dl_deadline;
522 u64 dl_period;
523 u64 dl_bw;
524 u64 dl_density;
525
526
527
528
529
530
531 s64 runtime;
532 u64 deadline;
533 unsigned int flags;
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559 unsigned int dl_throttled : 1;
560 unsigned int dl_boosted : 1;
561 unsigned int dl_yielded : 1;
562 unsigned int dl_non_contending : 1;
563 unsigned int dl_overrun : 1;
564
565
566
567
568
569 struct hrtimer dl_timer;
570
571
572
573
574
575
576
577
578 struct hrtimer inactive_timer;
579};
580
581#ifdef CONFIG_UCLAMP_TASK
582
583#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608struct uclamp_se {
609 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
610 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
611 unsigned int active : 1;
612 unsigned int user_defined : 1;
613};
614#endif
615
616union rcu_special {
617 struct {
618 u8 blocked;
619 u8 need_qs;
620 u8 exp_hint;
621 u8 deferred_qs;
622 } b;
623 u32 s;
624};
625
626enum perf_event_task_context {
627 perf_invalid_context = -1,
628 perf_hw_context = 0,
629 perf_sw_context,
630 perf_nr_task_contexts,
631};
632
633struct wake_q_node {
634 struct wake_q_node *next;
635};
636
637struct task_struct {
638#ifdef CONFIG_THREAD_INFO_IN_TASK
639
640
641
642
643 struct thread_info thread_info;
644#endif
645
646 volatile long state;
647
648
649
650
651
652 randomized_struct_fields_start
653
654 void *stack;
655 refcount_t usage;
656
657 unsigned int flags;
658 unsigned int ptrace;
659
660#ifdef CONFIG_SMP
661 struct llist_node wake_entry;
662 int on_cpu;
663#ifdef CONFIG_THREAD_INFO_IN_TASK
664
665 unsigned int cpu;
666#endif
667 unsigned int wakee_flips;
668 unsigned long wakee_flip_decay_ts;
669 struct task_struct *last_wakee;
670
671
672
673
674
675
676
677
678 int recent_used_cpu;
679 int wake_cpu;
680#endif
681 int on_rq;
682
683 int prio;
684 int static_prio;
685 int normal_prio;
686 unsigned int rt_priority;
687
688 const struct sched_class *sched_class;
689 struct sched_entity se;
690 struct sched_rt_entity rt;
691#ifdef CONFIG_CGROUP_SCHED
692 struct task_group *sched_task_group;
693#endif
694 struct sched_dl_entity dl;
695
696#ifdef CONFIG_UCLAMP_TASK
697
698 struct uclamp_se uclamp_req[UCLAMP_CNT];
699
700 struct uclamp_se uclamp[UCLAMP_CNT];
701#endif
702
703#ifdef CONFIG_PREEMPT_NOTIFIERS
704
705 struct hlist_head preempt_notifiers;
706#endif
707
708#ifdef CONFIG_BLK_DEV_IO_TRACE
709 unsigned int btrace_seq;
710#endif
711
712 unsigned int policy;
713 int nr_cpus_allowed;
714 const cpumask_t *cpus_ptr;
715 cpumask_t cpus_mask;
716
717#ifdef CONFIG_PREEMPT_RCU
718 int rcu_read_lock_nesting;
719 union rcu_special rcu_read_unlock_special;
720 struct list_head rcu_node_entry;
721 struct rcu_node *rcu_blocked_node;
722#endif
723
724#ifdef CONFIG_TASKS_RCU
725 unsigned long rcu_tasks_nvcsw;
726 u8 rcu_tasks_holdout;
727 u8 rcu_tasks_idx;
728 int rcu_tasks_idle_cpu;
729 struct list_head rcu_tasks_holdout_list;
730#endif
731
732 struct sched_info sched_info;
733
734 struct list_head tasks;
735#ifdef CONFIG_SMP
736 struct plist_node pushable_tasks;
737 struct rb_node pushable_dl_tasks;
738#endif
739
740 struct mm_struct *mm;
741 struct mm_struct *active_mm;
742
743
744 struct vmacache vmacache;
745
746#ifdef SPLIT_RSS_COUNTING
747 struct task_rss_stat rss_stat;
748#endif
749 int exit_state;
750 int exit_code;
751 int exit_signal;
752
753 int pdeath_signal;
754
755 unsigned long jobctl;
756
757
758 unsigned int personality;
759
760
761 unsigned sched_reset_on_fork:1;
762 unsigned sched_contributes_to_load:1;
763 unsigned sched_migrated:1;
764 unsigned sched_remote_wakeup:1;
765#ifdef CONFIG_PSI
766 unsigned sched_psi_wake_requeue:1;
767#endif
768
769
770 unsigned :0;
771
772
773
774
775 unsigned in_execve:1;
776 unsigned in_iowait:1;
777#ifndef TIF_RESTORE_SIGMASK
778 unsigned restore_sigmask:1;
779#endif
780#ifdef CONFIG_MEMCG
781 unsigned in_user_fault:1;
782#endif
783#ifdef CONFIG_COMPAT_BRK
784 unsigned brk_randomized:1;
785#endif
786#ifdef CONFIG_CGROUPS
787
788 unsigned no_cgroup_migration:1;
789
790 unsigned frozen:1;
791#endif
792#ifdef CONFIG_BLK_CGROUP
793
794 unsigned use_memdelay:1;
795#endif
796
797 unsigned long atomic_flags;
798
799 struct restart_block restart_block;
800
801 pid_t pid;
802 pid_t tgid;
803
804#ifdef CONFIG_STACKPROTECTOR
805
806 unsigned long stack_canary;
807#endif
808
809
810
811
812
813
814
815 struct task_struct __rcu *real_parent;
816
817
818 struct task_struct __rcu *parent;
819
820
821
822
823 struct list_head children;
824 struct list_head sibling;
825 struct task_struct *group_leader;
826
827
828
829
830
831
832
833 struct list_head ptraced;
834 struct list_head ptrace_entry;
835
836
837 struct pid *thread_pid;
838 struct hlist_node pid_links[PIDTYPE_MAX];
839 struct list_head thread_group;
840 struct list_head thread_node;
841
842 struct completion *vfork_done;
843
844
845 int __user *set_child_tid;
846
847
848 int __user *clear_child_tid;
849
850 u64 utime;
851 u64 stime;
852#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
853 u64 utimescaled;
854 u64 stimescaled;
855#endif
856 u64 gtime;
857 struct prev_cputime prev_cputime;
858#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
859 struct vtime vtime;
860#endif
861
862#ifdef CONFIG_NO_HZ_FULL
863 atomic_t tick_dep_mask;
864#endif
865
866 unsigned long nvcsw;
867 unsigned long nivcsw;
868
869
870 u64 start_time;
871
872
873 u64 real_start_time;
874
875
876 unsigned long min_flt;
877 unsigned long maj_flt;
878
879#ifdef CONFIG_POSIX_TIMERS
880 struct task_cputime cputime_expires;
881 struct list_head cpu_timers[3];
882#endif
883
884
885
886
887 const struct cred __rcu *ptracer_cred;
888
889
890 const struct cred __rcu *real_cred;
891
892
893 const struct cred __rcu *cred;
894
895#ifdef CONFIG_KEYS
896
897 struct key *cached_requested_key;
898#endif
899
900
901
902
903
904
905
906
907 char comm[TASK_COMM_LEN];
908
909 struct nameidata *nameidata;
910
911#ifdef CONFIG_SYSVIPC
912 struct sysv_sem sysvsem;
913 struct sysv_shm sysvshm;
914#endif
915#ifdef CONFIG_DETECT_HUNG_TASK
916 unsigned long last_switch_count;
917 unsigned long last_switch_time;
918#endif
919
920 struct fs_struct *fs;
921
922
923 struct files_struct *files;
924
925
926 struct nsproxy *nsproxy;
927
928
929 struct signal_struct *signal;
930 struct sighand_struct *sighand;
931 sigset_t blocked;
932 sigset_t real_blocked;
933
934 sigset_t saved_sigmask;
935 struct sigpending pending;
936 unsigned long sas_ss_sp;
937 size_t sas_ss_size;
938 unsigned int sas_ss_flags;
939
940 struct callback_head *task_works;
941
942#ifdef CONFIG_AUDIT
943#ifdef CONFIG_AUDITSYSCALL
944 struct audit_context *audit_context;
945#endif
946 kuid_t loginuid;
947 unsigned int sessionid;
948#endif
949 struct seccomp seccomp;
950
951
952 u32 parent_exec_id;
953 u32 self_exec_id;
954
955
956 spinlock_t alloc_lock;
957
958
959 raw_spinlock_t pi_lock;
960
961 struct wake_q_node wake_q;
962
963#ifdef CONFIG_RT_MUTEXES
964
965 struct rb_root_cached pi_waiters;
966
967 struct task_struct *pi_top_task;
968
969 struct rt_mutex_waiter *pi_blocked_on;
970#endif
971
972#ifdef CONFIG_DEBUG_MUTEXES
973
974 struct mutex_waiter *blocked_on;
975#endif
976
977#ifdef CONFIG_TRACE_IRQFLAGS
978 unsigned int irq_events;
979 unsigned long hardirq_enable_ip;
980 unsigned long hardirq_disable_ip;
981 unsigned int hardirq_enable_event;
982 unsigned int hardirq_disable_event;
983 int hardirqs_enabled;
984 int hardirq_context;
985 unsigned long softirq_disable_ip;
986 unsigned long softirq_enable_ip;
987 unsigned int softirq_disable_event;
988 unsigned int softirq_enable_event;
989 int softirqs_enabled;
990 int softirq_context;
991#endif
992
993#ifdef CONFIG_LOCKDEP
994# define MAX_LOCK_DEPTH 48UL
995 u64 curr_chain_key;
996 int lockdep_depth;
997 unsigned int lockdep_recursion;
998 struct held_lock held_locks[MAX_LOCK_DEPTH];
999#endif
1000
1001#ifdef CONFIG_UBSAN
1002 unsigned int in_ubsan;
1003#endif
1004
1005
1006 void *journal_info;
1007
1008
1009 struct bio_list *bio_list;
1010
1011#ifdef CONFIG_BLOCK
1012
1013 struct blk_plug *plug;
1014#endif
1015
1016
1017 struct reclaim_state *reclaim_state;
1018
1019 struct backing_dev_info *backing_dev_info;
1020
1021 struct io_context *io_context;
1022
1023#ifdef CONFIG_COMPACTION
1024 struct capture_control *capture_control;
1025#endif
1026
1027 unsigned long ptrace_message;
1028 kernel_siginfo_t *last_siginfo;
1029
1030 struct task_io_accounting ioac;
1031#ifdef CONFIG_PSI
1032
1033 unsigned int psi_flags;
1034#endif
1035#ifdef CONFIG_TASK_XACCT
1036
1037 u64 acct_rss_mem1;
1038
1039 u64 acct_vm_mem1;
1040
1041 u64 acct_timexpd;
1042#endif
1043#ifdef CONFIG_CPUSETS
1044
1045 nodemask_t mems_allowed;
1046
1047 seqcount_t mems_allowed_seq;
1048 int cpuset_mem_spread_rotor;
1049 int cpuset_slab_spread_rotor;
1050#endif
1051#ifdef CONFIG_CGROUPS
1052
1053 struct css_set __rcu *cgroups;
1054
1055 struct list_head cg_list;
1056#endif
1057#ifdef CONFIG_X86_CPU_RESCTRL
1058 u32 closid;
1059 u32 rmid;
1060#endif
1061#ifdef CONFIG_FUTEX
1062 struct robust_list_head __user *robust_list;
1063#ifdef CONFIG_COMPAT
1064 struct compat_robust_list_head __user *compat_robust_list;
1065#endif
1066 struct list_head pi_state_list;
1067 struct futex_pi_state *pi_state_cache;
1068#endif
1069#ifdef CONFIG_PERF_EVENTS
1070 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1071 struct mutex perf_event_mutex;
1072 struct list_head perf_event_list;
1073#endif
1074#ifdef CONFIG_DEBUG_PREEMPT
1075 unsigned long preempt_disable_ip;
1076#endif
1077#ifdef CONFIG_NUMA
1078
1079 struct mempolicy *mempolicy;
1080 short il_prev;
1081 short pref_node_fork;
1082#endif
1083#ifdef CONFIG_NUMA_BALANCING
1084 int numa_scan_seq;
1085 unsigned int numa_scan_period;
1086 unsigned int numa_scan_period_max;
1087 int numa_preferred_nid;
1088 unsigned long numa_migrate_retry;
1089
1090 u64 node_stamp;
1091 u64 last_task_numa_placement;
1092 u64 last_sum_exec_runtime;
1093 struct callback_head numa_work;
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103 struct numa_group __rcu *numa_group;
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 unsigned long *numa_faults;
1120 unsigned long total_numa_faults;
1121
1122
1123
1124
1125
1126
1127
1128 unsigned long numa_faults_locality[3];
1129
1130 unsigned long numa_pages_migrated;
1131#endif
1132
1133#ifdef CONFIG_RSEQ
1134 struct rseq __user *rseq;
1135 u32 rseq_sig;
1136
1137
1138
1139
1140 unsigned long rseq_event_mask;
1141#endif
1142
1143 struct tlbflush_unmap_batch tlb_ubc;
1144
1145 struct rcu_head rcu;
1146
1147
1148 struct pipe_inode_info *splice_pipe;
1149
1150 struct page_frag task_frag;
1151
1152#ifdef CONFIG_TASK_DELAY_ACCT
1153 struct task_delay_info *delays;
1154#endif
1155
1156#ifdef CONFIG_FAULT_INJECTION
1157 int make_it_fail;
1158 unsigned int fail_nth;
1159#endif
1160
1161
1162
1163
1164 int nr_dirtied;
1165 int nr_dirtied_pause;
1166
1167 unsigned long dirty_paused_when;
1168
1169#ifdef CONFIG_LATENCYTOP
1170 int latency_record_count;
1171 struct latency_record latency_record[LT_SAVECOUNT];
1172#endif
1173
1174
1175
1176
1177 u64 timer_slack_ns;
1178 u64 default_timer_slack_ns;
1179
1180#ifdef CONFIG_KASAN
1181 unsigned int kasan_depth;
1182#endif
1183
1184#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1185
1186 int curr_ret_stack;
1187 int curr_ret_depth;
1188
1189
1190 struct ftrace_ret_stack *ret_stack;
1191
1192
1193 unsigned long long ftrace_timestamp;
1194
1195
1196
1197
1198
1199 atomic_t trace_overrun;
1200
1201
1202 atomic_t tracing_graph_pause;
1203#endif
1204
1205#ifdef CONFIG_TRACING
1206
1207 unsigned long trace;
1208
1209
1210 unsigned long trace_recursion;
1211#endif
1212
1213#ifdef CONFIG_KCOV
1214
1215 unsigned int kcov_mode;
1216
1217
1218 unsigned int kcov_size;
1219
1220
1221 void *kcov_area;
1222
1223
1224 struct kcov *kcov;
1225#endif
1226
1227#ifdef CONFIG_MEMCG
1228 struct mem_cgroup *memcg_in_oom;
1229 gfp_t memcg_oom_gfp_mask;
1230 int memcg_oom_order;
1231
1232
1233 unsigned int memcg_nr_pages_over_high;
1234
1235
1236 struct mem_cgroup *active_memcg;
1237#endif
1238
1239#ifdef CONFIG_BLK_CGROUP
1240 struct request_queue *throttle_queue;
1241#endif
1242
1243#ifdef CONFIG_UPROBES
1244 struct uprobe_task *utask;
1245#endif
1246#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1247 unsigned int sequential_io;
1248 unsigned int sequential_io_avg;
1249#endif
1250#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1251 unsigned long task_state_change;
1252#endif
1253 int pagefault_disabled;
1254#ifdef CONFIG_MMU
1255 struct task_struct *oom_reaper_list;
1256#endif
1257#ifdef CONFIG_VMAP_STACK
1258 struct vm_struct *stack_vm_area;
1259#endif
1260#ifdef CONFIG_THREAD_INFO_IN_TASK
1261
1262 refcount_t stack_refcount;
1263#endif
1264#ifdef CONFIG_LIVEPATCH
1265 int patch_state;
1266#endif
1267#ifdef CONFIG_SECURITY
1268
1269 void *security;
1270#endif
1271
1272#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1273 unsigned long lowest_stack;
1274 unsigned long prev_lowest_stack;
1275#endif
1276
1277
1278
1279
1280
1281 randomized_struct_fields_end
1282
1283
1284 struct thread_struct thread;
1285
1286
1287
1288
1289
1290
1291
1292};
1293
1294static inline struct pid *task_pid(struct task_struct *task)
1295{
1296 return task->thread_pid;
1297}
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1311
1312static inline pid_t task_pid_nr(struct task_struct *tsk)
1313{
1314 return tsk->pid;
1315}
1316
1317static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1318{
1319 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1320}
1321
1322static inline pid_t task_pid_vnr(struct task_struct *tsk)
1323{
1324 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1325}
1326
1327
1328static inline pid_t task_tgid_nr(struct task_struct *tsk)
1329{
1330 return tsk->tgid;
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343static inline int pid_alive(const struct task_struct *p)
1344{
1345 return p->thread_pid != NULL;
1346}
1347
1348static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1349{
1350 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1351}
1352
1353static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1354{
1355 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1356}
1357
1358
1359static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1360{
1361 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1362}
1363
1364static inline pid_t task_session_vnr(struct task_struct *tsk)
1365{
1366 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1367}
1368
1369static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1370{
1371 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1372}
1373
1374static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1375{
1376 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1377}
1378
1379static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1380{
1381 pid_t pid = 0;
1382
1383 rcu_read_lock();
1384 if (pid_alive(tsk))
1385 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1386 rcu_read_unlock();
1387
1388 return pid;
1389}
1390
1391static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1392{
1393 return task_ppid_nr_ns(tsk, &init_pid_ns);
1394}
1395
1396
1397static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1398{
1399 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1400}
1401
1402#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1403#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1404
1405static inline unsigned int task_state_index(struct task_struct *tsk)
1406{
1407 unsigned int tsk_state = READ_ONCE(tsk->state);
1408 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1409
1410 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1411
1412 if (tsk_state == TASK_IDLE)
1413 state = TASK_REPORT_IDLE;
1414
1415 return fls(state);
1416}
1417
1418static inline char task_index_to_char(unsigned int state)
1419{
1420 static const char state_char[] = "RSDTtXZPI";
1421
1422 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1423
1424 return state_char[state];
1425}
1426
1427static inline char task_state_to_char(struct task_struct *tsk)
1428{
1429 return task_index_to_char(task_state_index(tsk));
1430}
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441static inline int is_global_init(struct task_struct *tsk)
1442{
1443 return task_tgid_nr(tsk) == 1;
1444}
1445
1446extern struct pid *cad_pid;
1447
1448
1449
1450
1451#define PF_IDLE 0x00000002
1452#define PF_EXITING 0x00000004
1453#define PF_EXITPIDONE 0x00000008
1454#define PF_VCPU 0x00000010
1455#define PF_WQ_WORKER 0x00000020
1456#define PF_FORKNOEXEC 0x00000040
1457#define PF_MCE_PROCESS 0x00000080
1458#define PF_SUPERPRIV 0x00000100
1459#define PF_DUMPCORE 0x00000200
1460#define PF_SIGNALED 0x00000400
1461#define PF_MEMALLOC 0x00000800
1462#define PF_NPROC_EXCEEDED 0x00001000
1463#define PF_USED_MATH 0x00002000
1464#define PF_USED_ASYNC 0x00004000
1465#define PF_NOFREEZE 0x00008000
1466#define PF_FROZEN 0x00010000
1467#define PF_KSWAPD 0x00020000
1468#define PF_MEMALLOC_NOFS 0x00040000
1469#define PF_MEMALLOC_NOIO 0x00080000
1470#define PF_LESS_THROTTLE 0x00100000
1471#define PF_KTHREAD 0x00200000
1472#define PF_RANDOMIZE 0x00400000
1473#define PF_SWAPWRITE 0x00800000
1474#define PF_MEMSTALL 0x01000000
1475#define PF_UMH 0x02000000
1476#define PF_NO_SETAFFINITY 0x04000000
1477#define PF_MCE_EARLY 0x08000000
1478#define PF_MEMALLOC_NOCMA 0x10000000
1479#define PF_FREEZER_SKIP 0x40000000
1480#define PF_SUSPEND_TASK 0x80000000
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1494#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1495#define clear_used_math() clear_stopped_child_used_math(current)
1496#define set_used_math() set_stopped_child_used_math(current)
1497
1498#define conditional_stopped_child_used_math(condition, child) \
1499 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1500
1501#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1502
1503#define copy_to_stopped_child_used_math(child) \
1504 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1505
1506
1507#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1508#define used_math() tsk_used_math(current)
1509
1510static inline bool is_percpu_thread(void)
1511{
1512#ifdef CONFIG_SMP
1513 return (current->flags & PF_NO_SETAFFINITY) &&
1514 (current->nr_cpus_allowed == 1);
1515#else
1516 return true;
1517#endif
1518}
1519
1520
1521#define PFA_NO_NEW_PRIVS 0
1522#define PFA_SPREAD_PAGE 1
1523#define PFA_SPREAD_SLAB 2
1524#define PFA_SPEC_SSB_DISABLE 3
1525#define PFA_SPEC_SSB_FORCE_DISABLE 4
1526#define PFA_SPEC_IB_DISABLE 5
1527#define PFA_SPEC_IB_FORCE_DISABLE 6
1528#define PFA_SPEC_SSB_NOEXEC 7
1529
1530#define TASK_PFA_TEST(name, func) \
1531 static inline bool task_##func(struct task_struct *p) \
1532 { return test_bit(PFA_##name, &p->atomic_flags); }
1533
1534#define TASK_PFA_SET(name, func) \
1535 static inline void task_set_##func(struct task_struct *p) \
1536 { set_bit(PFA_##name, &p->atomic_flags); }
1537
1538#define TASK_PFA_CLEAR(name, func) \
1539 static inline void task_clear_##func(struct task_struct *p) \
1540 { clear_bit(PFA_##name, &p->atomic_flags); }
1541
1542TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1543TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1544
1545TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1546TASK_PFA_SET(SPREAD_PAGE, spread_page)
1547TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1548
1549TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1550TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1551TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1552
1553TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1554TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1555TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1556
1557TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1558TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1559TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1560
1561TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1562TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1563
1564TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1565TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1566TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1567
1568TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1569TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1570
1571static inline void
1572current_restore_flags(unsigned long orig_flags, unsigned long flags)
1573{
1574 current->flags &= ~flags;
1575 current->flags |= orig_flags & flags;
1576}
1577
1578extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1579extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1580#ifdef CONFIG_SMP
1581extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1582extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1583#else
1584static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1585{
1586}
1587static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1588{
1589 if (!cpumask_test_cpu(0, new_mask))
1590 return -EINVAL;
1591 return 0;
1592}
1593#endif
1594
1595extern int yield_to(struct task_struct *p, bool preempt);
1596extern void set_user_nice(struct task_struct *p, long nice);
1597extern int task_prio(const struct task_struct *p);
1598
1599
1600
1601
1602
1603
1604
1605static inline int task_nice(const struct task_struct *p)
1606{
1607 return PRIO_TO_NICE((p)->static_prio);
1608}
1609
1610extern int can_nice(const struct task_struct *p, const int nice);
1611extern int task_curr(const struct task_struct *p);
1612extern int idle_cpu(int cpu);
1613extern int available_idle_cpu(int cpu);
1614extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1615extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1616extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1617extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1618extern struct task_struct *idle_task(int cpu);
1619
1620
1621
1622
1623
1624
1625
1626static inline bool is_idle_task(const struct task_struct *p)
1627{
1628 return !!(p->flags & PF_IDLE);
1629}
1630
1631extern struct task_struct *curr_task(int cpu);
1632extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1633
1634void yield(void);
1635
1636union thread_union {
1637#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1638 struct task_struct task;
1639#endif
1640#ifndef CONFIG_THREAD_INFO_IN_TASK
1641 struct thread_info thread_info;
1642#endif
1643 unsigned long stack[THREAD_SIZE/sizeof(long)];
1644};
1645
1646#ifndef CONFIG_THREAD_INFO_IN_TASK
1647extern struct thread_info init_thread_info;
1648#endif
1649
1650extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1651
1652#ifdef CONFIG_THREAD_INFO_IN_TASK
1653static inline struct thread_info *task_thread_info(struct task_struct *task)
1654{
1655 return &task->thread_info;
1656}
1657#elif !defined(__HAVE_THREAD_FUNCTIONS)
1658# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1659#endif
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672extern struct task_struct *find_task_by_vpid(pid_t nr);
1673extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1674
1675
1676
1677
1678extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1679
1680extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1681extern int wake_up_process(struct task_struct *tsk);
1682extern void wake_up_new_task(struct task_struct *tsk);
1683
1684#ifdef CONFIG_SMP
1685extern void kick_process(struct task_struct *tsk);
1686#else
1687static inline void kick_process(struct task_struct *tsk) { }
1688#endif
1689
1690extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1691
1692static inline void set_task_comm(struct task_struct *tsk, const char *from)
1693{
1694 __set_task_comm(tsk, from, false);
1695}
1696
1697extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1698#define get_task_comm(buf, tsk) ({ \
1699 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1700 __get_task_comm(buf, sizeof(buf), tsk); \
1701})
1702
1703#ifdef CONFIG_SMP
1704void scheduler_ipi(void);
1705extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1706#else
1707static inline void scheduler_ipi(void) { }
1708static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1709{
1710 return 1;
1711}
1712#endif
1713
1714
1715
1716
1717
1718static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1719{
1720 set_ti_thread_flag(task_thread_info(tsk), flag);
1721}
1722
1723static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1724{
1725 clear_ti_thread_flag(task_thread_info(tsk), flag);
1726}
1727
1728static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1729 bool value)
1730{
1731 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1732}
1733
1734static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1735{
1736 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1737}
1738
1739static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1740{
1741 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1742}
1743
1744static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1745{
1746 return test_ti_thread_flag(task_thread_info(tsk), flag);
1747}
1748
1749static inline void set_tsk_need_resched(struct task_struct *tsk)
1750{
1751 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1752}
1753
1754static inline void clear_tsk_need_resched(struct task_struct *tsk)
1755{
1756 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1757}
1758
1759static inline int test_tsk_need_resched(struct task_struct *tsk)
1760{
1761 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1762}
1763
1764
1765
1766
1767
1768
1769
1770#ifndef CONFIG_PREEMPT
1771extern int _cond_resched(void);
1772#else
1773static inline int _cond_resched(void) { return 0; }
1774#endif
1775
1776#define cond_resched() ({ \
1777 ___might_sleep(__FILE__, __LINE__, 0); \
1778 _cond_resched(); \
1779})
1780
1781extern int __cond_resched_lock(spinlock_t *lock);
1782
1783#define cond_resched_lock(lock) ({ \
1784 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1785 __cond_resched_lock(lock); \
1786})
1787
1788static inline void cond_resched_rcu(void)
1789{
1790#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1791 rcu_read_unlock();
1792 cond_resched();
1793 rcu_read_lock();
1794#endif
1795}
1796
1797
1798
1799
1800
1801
1802static inline int spin_needbreak(spinlock_t *lock)
1803{
1804#ifdef CONFIG_PREEMPT
1805 return spin_is_contended(lock);
1806#else
1807 return 0;
1808#endif
1809}
1810
1811static __always_inline bool need_resched(void)
1812{
1813 return unlikely(tif_need_resched());
1814}
1815
1816
1817
1818
1819#ifdef CONFIG_SMP
1820
1821static inline unsigned int task_cpu(const struct task_struct *p)
1822{
1823#ifdef CONFIG_THREAD_INFO_IN_TASK
1824 return READ_ONCE(p->cpu);
1825#else
1826 return READ_ONCE(task_thread_info(p)->cpu);
1827#endif
1828}
1829
1830extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1831
1832#else
1833
1834static inline unsigned int task_cpu(const struct task_struct *p)
1835{
1836 return 0;
1837}
1838
1839static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1840{
1841}
1842
1843#endif
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853#ifndef vcpu_is_preempted
1854# define vcpu_is_preempted(cpu) false
1855#endif
1856
1857extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1858extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1859
1860#ifndef TASK_SIZE_OF
1861#define TASK_SIZE_OF(tsk) TASK_SIZE
1862#endif
1863
1864#ifdef CONFIG_RSEQ
1865
1866
1867
1868
1869
1870enum rseq_event_mask_bits {
1871 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1872 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1873 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1874};
1875
1876enum rseq_event_mask {
1877 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
1878 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
1879 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
1880};
1881
1882static inline void rseq_set_notify_resume(struct task_struct *t)
1883{
1884 if (t->rseq)
1885 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1886}
1887
1888void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1889
1890static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1891 struct pt_regs *regs)
1892{
1893 if (current->rseq)
1894 __rseq_handle_notify_resume(ksig, regs);
1895}
1896
1897static inline void rseq_signal_deliver(struct ksignal *ksig,
1898 struct pt_regs *regs)
1899{
1900 preempt_disable();
1901 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
1902 preempt_enable();
1903 rseq_handle_notify_resume(ksig, regs);
1904}
1905
1906
1907static inline void rseq_preempt(struct task_struct *t)
1908{
1909 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
1910 rseq_set_notify_resume(t);
1911}
1912
1913
1914static inline void rseq_migrate(struct task_struct *t)
1915{
1916 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
1917 rseq_set_notify_resume(t);
1918}
1919
1920
1921
1922
1923
1924static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1925{
1926 if (clone_flags & CLONE_THREAD) {
1927 t->rseq = NULL;
1928 t->rseq_sig = 0;
1929 t->rseq_event_mask = 0;
1930 } else {
1931 t->rseq = current->rseq;
1932 t->rseq_sig = current->rseq_sig;
1933 t->rseq_event_mask = current->rseq_event_mask;
1934 }
1935}
1936
1937static inline void rseq_execve(struct task_struct *t)
1938{
1939 t->rseq = NULL;
1940 t->rseq_sig = 0;
1941 t->rseq_event_mask = 0;
1942}
1943
1944#else
1945
1946static inline void rseq_set_notify_resume(struct task_struct *t)
1947{
1948}
1949static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1950 struct pt_regs *regs)
1951{
1952}
1953static inline void rseq_signal_deliver(struct ksignal *ksig,
1954 struct pt_regs *regs)
1955{
1956}
1957static inline void rseq_preempt(struct task_struct *t)
1958{
1959}
1960static inline void rseq_migrate(struct task_struct *t)
1961{
1962}
1963static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1964{
1965}
1966static inline void rseq_execve(struct task_struct *t)
1967{
1968}
1969
1970#endif
1971
1972void __exit_umh(struct task_struct *tsk);
1973
1974static inline void exit_umh(struct task_struct *tsk)
1975{
1976 if (unlikely(tsk->flags & PF_UMH))
1977 __exit_umh(tsk);
1978}
1979
1980#ifdef CONFIG_DEBUG_RSEQ
1981
1982void rseq_syscall(struct pt_regs *regs);
1983
1984#else
1985
1986static inline void rseq_syscall(struct pt_regs *regs)
1987{
1988}
1989
1990#endif
1991
1992const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
1993char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
1994int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
1995
1996const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
1997const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
1998const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
1999
2000int sched_trace_rq_cpu(struct rq *rq);
2001
2002const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2003
2004#endif
2005