1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/kcov.h>
18#include <linux/mutex.h>
19#include <linux/plist.h>
20#include <linux/hrtimer.h>
21#include <linux/seccomp.h>
22#include <linux/nodemask.h>
23#include <linux/rcupdate.h>
24#include <linux/refcount.h>
25#include <linux/resource.h>
26#include <linux/latencytop.h>
27#include <linux/sched/prio.h>
28#include <linux/sched/types.h>
29#include <linux/signal_types.h>
30#include <linux/mm_types_task.h>
31#include <linux/task_io_accounting.h>
32#include <linux/posix-timers.h>
33#include <linux/rseq.h>
34
35
36struct audit_context;
37struct backing_dev_info;
38struct bio_list;
39struct blk_plug;
40struct capture_control;
41struct cfs_rq;
42struct fs_struct;
43struct futex_pi_state;
44struct io_context;
45struct mempolicy;
46struct nameidata;
47struct nsproxy;
48struct perf_event_context;
49struct pid_namespace;
50struct pipe_inode_info;
51struct rcu_node;
52struct reclaim_state;
53struct robust_list_head;
54struct root_domain;
55struct rq;
56struct sched_attr;
57struct sched_param;
58struct seq_file;
59struct sighand_struct;
60struct signal_struct;
61struct task_delay_info;
62struct task_group;
63
64
65
66
67
68
69
70
71
72
73
74
75
76#define TASK_RUNNING 0x0000
77#define TASK_INTERRUPTIBLE 0x0001
78#define TASK_UNINTERRUPTIBLE 0x0002
79#define __TASK_STOPPED 0x0004
80#define __TASK_TRACED 0x0008
81
82#define EXIT_DEAD 0x0010
83#define EXIT_ZOMBIE 0x0020
84#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
85
86#define TASK_PARKED 0x0040
87#define TASK_DEAD 0x0080
88#define TASK_WAKEKILL 0x0100
89#define TASK_WAKING 0x0200
90#define TASK_NOLOAD 0x0400
91#define TASK_NEW 0x0800
92#define TASK_STATE_MAX 0x1000
93
94
95#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
96#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
97#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
98
99#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
100
101
102#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
103
104
105#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
106 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
107 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
108 TASK_PARKED)
109
110#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
111
112#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
113
114#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
115
116#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
117 (task->flags & PF_FROZEN) == 0 && \
118 (task->state & TASK_NOLOAD) == 0)
119
120#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
121
122
123
124
125
126#define is_special_task_state(state) \
127 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
128
129#define __set_current_state(state_value) \
130 do { \
131 WARN_ON_ONCE(is_special_task_state(state_value));\
132 current->task_state_change = _THIS_IP_; \
133 current->state = (state_value); \
134 } while (0)
135
136#define set_current_state(state_value) \
137 do { \
138 WARN_ON_ONCE(is_special_task_state(state_value));\
139 current->task_state_change = _THIS_IP_; \
140 smp_store_mb(current->state, (state_value)); \
141 } while (0)
142
143#define set_special_state(state_value) \
144 do { \
145 unsigned long flags; \
146 WARN_ON_ONCE(!is_special_task_state(state_value)); \
147 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
148 current->task_state_change = _THIS_IP_; \
149 current->state = (state_value); \
150 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
151 } while (0)
152#else
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190#define __set_current_state(state_value) \
191 current->state = (state_value)
192
193#define set_current_state(state_value) \
194 smp_store_mb(current->state, (state_value))
195
196
197
198
199
200
201
202#define set_special_state(state_value) \
203 do { \
204 unsigned long flags; \
205 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
206 current->state = (state_value); \
207 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
208 } while (0)
209
210#endif
211
212
213#define TASK_COMM_LEN 16
214
215extern void scheduler_tick(void);
216
217#define MAX_SCHEDULE_TIMEOUT LONG_MAX
218
219extern long schedule_timeout(long timeout);
220extern long schedule_timeout_interruptible(long timeout);
221extern long schedule_timeout_killable(long timeout);
222extern long schedule_timeout_uninterruptible(long timeout);
223extern long schedule_timeout_idle(long timeout);
224asmlinkage void schedule(void);
225extern void schedule_preempt_disabled(void);
226asmlinkage void preempt_schedule_irq(void);
227
228extern int __must_check io_schedule_prepare(void);
229extern void io_schedule_finish(int token);
230extern long io_schedule_timeout(long timeout);
231extern void io_schedule(void);
232
233
234
235
236
237
238
239
240
241
242struct prev_cputime {
243#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
244 u64 utime;
245 u64 stime;
246 raw_spinlock_t lock;
247#endif
248};
249
250enum vtime_state {
251
252 VTIME_INACTIVE = 0,
253
254 VTIME_IDLE,
255
256 VTIME_SYS,
257
258 VTIME_USER,
259
260 VTIME_GUEST,
261};
262
263struct vtime {
264 seqcount_t seqcount;
265 unsigned long long starttime;
266 enum vtime_state state;
267 unsigned int cpu;
268 u64 utime;
269 u64 stime;
270 u64 gtime;
271};
272
273
274
275
276
277
278
279enum uclamp_id {
280 UCLAMP_MIN = 0,
281 UCLAMP_MAX,
282 UCLAMP_CNT
283};
284
285#ifdef CONFIG_SMP
286extern struct root_domain def_root_domain;
287extern struct mutex sched_domains_mutex;
288#endif
289
290struct sched_info {
291#ifdef CONFIG_SCHED_INFO
292
293
294
295 unsigned long pcount;
296
297
298 unsigned long long run_delay;
299
300
301
302
303 unsigned long long last_arrival;
304
305
306 unsigned long long last_queued;
307
308#endif
309};
310
311
312
313
314
315
316
317
318# define SCHED_FIXEDPOINT_SHIFT 10
319# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
320
321
322# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
323# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
324
325struct load_weight {
326 unsigned long weight;
327 u32 inv_weight;
328};
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352struct util_est {
353 unsigned int enqueued;
354 unsigned int ewma;
355#define UTIL_EST_WEIGHT_SHIFT 2
356} __attribute__((__aligned__(sizeof(u64))));
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401struct sched_avg {
402 u64 last_update_time;
403 u64 load_sum;
404 u64 runnable_load_sum;
405 u32 util_sum;
406 u32 period_contrib;
407 unsigned long load_avg;
408 unsigned long runnable_load_avg;
409 unsigned long util_avg;
410 struct util_est util_est;
411} ____cacheline_aligned;
412
413struct sched_statistics {
414#ifdef CONFIG_SCHEDSTATS
415 u64 wait_start;
416 u64 wait_max;
417 u64 wait_count;
418 u64 wait_sum;
419 u64 iowait_count;
420 u64 iowait_sum;
421
422 u64 sleep_start;
423 u64 sleep_max;
424 s64 sum_sleep_runtime;
425
426 u64 block_start;
427 u64 block_max;
428 u64 exec_max;
429 u64 slice_max;
430
431 u64 nr_migrations_cold;
432 u64 nr_failed_migrations_affine;
433 u64 nr_failed_migrations_running;
434 u64 nr_failed_migrations_hot;
435 u64 nr_forced_migrations;
436
437 u64 nr_wakeups;
438 u64 nr_wakeups_sync;
439 u64 nr_wakeups_migrate;
440 u64 nr_wakeups_local;
441 u64 nr_wakeups_remote;
442 u64 nr_wakeups_affine;
443 u64 nr_wakeups_affine_attempts;
444 u64 nr_wakeups_passive;
445 u64 nr_wakeups_idle;
446#endif
447};
448
449struct sched_entity {
450
451 struct load_weight load;
452 unsigned long runnable_weight;
453 struct rb_node run_node;
454 struct list_head group_node;
455 unsigned int on_rq;
456
457 u64 exec_start;
458 u64 sum_exec_runtime;
459 u64 vruntime;
460 u64 prev_sum_exec_runtime;
461
462 u64 nr_migrations;
463
464 struct sched_statistics statistics;
465
466#ifdef CONFIG_FAIR_GROUP_SCHED
467 int depth;
468 struct sched_entity *parent;
469
470 struct cfs_rq *cfs_rq;
471
472 struct cfs_rq *my_q;
473#endif
474
475#ifdef CONFIG_SMP
476
477
478
479
480
481
482 struct sched_avg avg;
483#endif
484};
485
486struct sched_rt_entity {
487 struct list_head run_list;
488 unsigned long timeout;
489 unsigned long watchdog_stamp;
490 unsigned int time_slice;
491 unsigned short on_rq;
492 unsigned short on_list;
493
494 struct sched_rt_entity *back;
495#ifdef CONFIG_RT_GROUP_SCHED
496 struct sched_rt_entity *parent;
497
498 struct rt_rq *rt_rq;
499
500 struct rt_rq *my_q;
501#endif
502} __randomize_layout;
503
504struct sched_dl_entity {
505 struct rb_node rb_node;
506
507
508
509
510
511
512 u64 dl_runtime;
513 u64 dl_deadline;
514 u64 dl_period;
515 u64 dl_bw;
516 u64 dl_density;
517
518
519
520
521
522
523 s64 runtime;
524 u64 deadline;
525 unsigned int flags;
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551 unsigned int dl_throttled : 1;
552 unsigned int dl_boosted : 1;
553 unsigned int dl_yielded : 1;
554 unsigned int dl_non_contending : 1;
555 unsigned int dl_overrun : 1;
556
557
558
559
560
561 struct hrtimer dl_timer;
562
563
564
565
566
567
568
569
570 struct hrtimer inactive_timer;
571};
572
573#ifdef CONFIG_UCLAMP_TASK
574
575#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600struct uclamp_se {
601 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
602 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
603 unsigned int active : 1;
604 unsigned int user_defined : 1;
605};
606#endif
607
608union rcu_special {
609 struct {
610 u8 blocked;
611 u8 need_qs;
612 u8 exp_hint;
613 u8 deferred_qs;
614 } b;
615 u32 s;
616};
617
618enum perf_event_task_context {
619 perf_invalid_context = -1,
620 perf_hw_context = 0,
621 perf_sw_context,
622 perf_nr_task_contexts,
623};
624
625struct wake_q_node {
626 struct wake_q_node *next;
627};
628
629struct task_struct {
630#ifdef CONFIG_THREAD_INFO_IN_TASK
631
632
633
634
635 struct thread_info thread_info;
636#endif
637
638 volatile long state;
639
640
641
642
643
644 randomized_struct_fields_start
645
646 void *stack;
647 refcount_t usage;
648
649 unsigned int flags;
650 unsigned int ptrace;
651
652#ifdef CONFIG_SMP
653 struct llist_node wake_entry;
654 int on_cpu;
655#ifdef CONFIG_THREAD_INFO_IN_TASK
656
657 unsigned int cpu;
658#endif
659 unsigned int wakee_flips;
660 unsigned long wakee_flip_decay_ts;
661 struct task_struct *last_wakee;
662
663
664
665
666
667
668
669
670 int recent_used_cpu;
671 int wake_cpu;
672#endif
673 int on_rq;
674
675 int prio;
676 int static_prio;
677 int normal_prio;
678 unsigned int rt_priority;
679
680 const struct sched_class *sched_class;
681 struct sched_entity se;
682 struct sched_rt_entity rt;
683#ifdef CONFIG_CGROUP_SCHED
684 struct task_group *sched_task_group;
685#endif
686 struct sched_dl_entity dl;
687
688#ifdef CONFIG_UCLAMP_TASK
689
690 struct uclamp_se uclamp_req[UCLAMP_CNT];
691
692 struct uclamp_se uclamp[UCLAMP_CNT];
693#endif
694
695#ifdef CONFIG_PREEMPT_NOTIFIERS
696
697 struct hlist_head preempt_notifiers;
698#endif
699
700#ifdef CONFIG_BLK_DEV_IO_TRACE
701 unsigned int btrace_seq;
702#endif
703
704 unsigned int policy;
705 int nr_cpus_allowed;
706 const cpumask_t *cpus_ptr;
707 cpumask_t cpus_mask;
708
709#ifdef CONFIG_PREEMPT_RCU
710 int rcu_read_lock_nesting;
711 union rcu_special rcu_read_unlock_special;
712 struct list_head rcu_node_entry;
713 struct rcu_node *rcu_blocked_node;
714#endif
715
716#ifdef CONFIG_TASKS_RCU
717 unsigned long rcu_tasks_nvcsw;
718 u8 rcu_tasks_holdout;
719 u8 rcu_tasks_idx;
720 int rcu_tasks_idle_cpu;
721 struct list_head rcu_tasks_holdout_list;
722#endif
723
724 struct sched_info sched_info;
725
726 struct list_head tasks;
727#ifdef CONFIG_SMP
728 struct plist_node pushable_tasks;
729 struct rb_node pushable_dl_tasks;
730#endif
731
732 struct mm_struct *mm;
733 struct mm_struct *active_mm;
734
735
736 struct vmacache vmacache;
737
738#ifdef SPLIT_RSS_COUNTING
739 struct task_rss_stat rss_stat;
740#endif
741 int exit_state;
742 int exit_code;
743 int exit_signal;
744
745 int pdeath_signal;
746
747 unsigned long jobctl;
748
749
750 unsigned int personality;
751
752
753 unsigned sched_reset_on_fork:1;
754 unsigned sched_contributes_to_load:1;
755 unsigned sched_migrated:1;
756 unsigned sched_remote_wakeup:1;
757#ifdef CONFIG_PSI
758 unsigned sched_psi_wake_requeue:1;
759#endif
760
761
762 unsigned :0;
763
764
765
766
767 unsigned in_execve:1;
768 unsigned in_iowait:1;
769#ifndef TIF_RESTORE_SIGMASK
770 unsigned restore_sigmask:1;
771#endif
772#ifdef CONFIG_MEMCG
773 unsigned in_user_fault:1;
774#endif
775#ifdef CONFIG_COMPAT_BRK
776 unsigned brk_randomized:1;
777#endif
778#ifdef CONFIG_CGROUPS
779
780 unsigned no_cgroup_migration:1;
781
782 unsigned frozen:1;
783#endif
784#ifdef CONFIG_BLK_CGROUP
785
786 unsigned use_memdelay:1;
787#endif
788
789 unsigned long atomic_flags;
790
791 struct restart_block restart_block;
792
793 pid_t pid;
794 pid_t tgid;
795
796#ifdef CONFIG_STACKPROTECTOR
797
798 unsigned long stack_canary;
799#endif
800
801
802
803
804
805
806
807 struct task_struct __rcu *real_parent;
808
809
810 struct task_struct __rcu *parent;
811
812
813
814
815 struct list_head children;
816 struct list_head sibling;
817 struct task_struct *group_leader;
818
819
820
821
822
823
824
825 struct list_head ptraced;
826 struct list_head ptrace_entry;
827
828
829 struct pid *thread_pid;
830 struct hlist_node pid_links[PIDTYPE_MAX];
831 struct list_head thread_group;
832 struct list_head thread_node;
833
834 struct completion *vfork_done;
835
836
837 int __user *set_child_tid;
838
839
840 int __user *clear_child_tid;
841
842 u64 utime;
843 u64 stime;
844#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
845 u64 utimescaled;
846 u64 stimescaled;
847#endif
848 u64 gtime;
849 struct prev_cputime prev_cputime;
850#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
851 struct vtime vtime;
852#endif
853
854#ifdef CONFIG_NO_HZ_FULL
855 atomic_t tick_dep_mask;
856#endif
857
858 unsigned long nvcsw;
859 unsigned long nivcsw;
860
861
862 u64 start_time;
863
864
865 u64 start_boottime;
866
867
868 unsigned long min_flt;
869 unsigned long maj_flt;
870
871
872 struct posix_cputimers posix_cputimers;
873
874
875
876
877 const struct cred __rcu *ptracer_cred;
878
879
880 const struct cred __rcu *real_cred;
881
882
883 const struct cred __rcu *cred;
884
885#ifdef CONFIG_KEYS
886
887 struct key *cached_requested_key;
888#endif
889
890
891
892
893
894
895
896
897 char comm[TASK_COMM_LEN];
898
899 struct nameidata *nameidata;
900
901#ifdef CONFIG_SYSVIPC
902 struct sysv_sem sysvsem;
903 struct sysv_shm sysvshm;
904#endif
905#ifdef CONFIG_DETECT_HUNG_TASK
906 unsigned long last_switch_count;
907 unsigned long last_switch_time;
908#endif
909
910 struct fs_struct *fs;
911
912
913 struct files_struct *files;
914
915
916 struct nsproxy *nsproxy;
917
918
919 struct signal_struct *signal;
920 struct sighand_struct *sighand;
921 sigset_t blocked;
922 sigset_t real_blocked;
923
924 sigset_t saved_sigmask;
925 struct sigpending pending;
926 unsigned long sas_ss_sp;
927 size_t sas_ss_size;
928 unsigned int sas_ss_flags;
929
930 struct callback_head *task_works;
931
932#ifdef CONFIG_AUDIT
933#ifdef CONFIG_AUDITSYSCALL
934 struct audit_context *audit_context;
935#endif
936 kuid_t loginuid;
937 unsigned int sessionid;
938#endif
939 struct seccomp seccomp;
940
941
942 u32 parent_exec_id;
943 u32 self_exec_id;
944
945
946 spinlock_t alloc_lock;
947
948
949 raw_spinlock_t pi_lock;
950
951 struct wake_q_node wake_q;
952
953#ifdef CONFIG_RT_MUTEXES
954
955 struct rb_root_cached pi_waiters;
956
957 struct task_struct *pi_top_task;
958
959 struct rt_mutex_waiter *pi_blocked_on;
960#endif
961
962#ifdef CONFIG_DEBUG_MUTEXES
963
964 struct mutex_waiter *blocked_on;
965#endif
966
967#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
968 int non_block_count;
969#endif
970
971#ifdef CONFIG_TRACE_IRQFLAGS
972 unsigned int irq_events;
973 unsigned long hardirq_enable_ip;
974 unsigned long hardirq_disable_ip;
975 unsigned int hardirq_enable_event;
976 unsigned int hardirq_disable_event;
977 int hardirqs_enabled;
978 int hardirq_context;
979 unsigned long softirq_disable_ip;
980 unsigned long softirq_enable_ip;
981 unsigned int softirq_disable_event;
982 unsigned int softirq_enable_event;
983 int softirqs_enabled;
984 int softirq_context;
985#endif
986
987#ifdef CONFIG_LOCKDEP
988# define MAX_LOCK_DEPTH 48UL
989 u64 curr_chain_key;
990 int lockdep_depth;
991 unsigned int lockdep_recursion;
992 struct held_lock held_locks[MAX_LOCK_DEPTH];
993#endif
994
995#ifdef CONFIG_UBSAN
996 unsigned int in_ubsan;
997#endif
998
999
1000 void *journal_info;
1001
1002
1003 struct bio_list *bio_list;
1004
1005#ifdef CONFIG_BLOCK
1006
1007 struct blk_plug *plug;
1008#endif
1009
1010
1011 struct reclaim_state *reclaim_state;
1012
1013 struct backing_dev_info *backing_dev_info;
1014
1015 struct io_context *io_context;
1016
1017#ifdef CONFIG_COMPACTION
1018 struct capture_control *capture_control;
1019#endif
1020
1021 unsigned long ptrace_message;
1022 kernel_siginfo_t *last_siginfo;
1023
1024 struct task_io_accounting ioac;
1025#ifdef CONFIG_PSI
1026
1027 unsigned int psi_flags;
1028#endif
1029#ifdef CONFIG_TASK_XACCT
1030
1031 u64 acct_rss_mem1;
1032
1033 u64 acct_vm_mem1;
1034
1035 u64 acct_timexpd;
1036#endif
1037#ifdef CONFIG_CPUSETS
1038
1039 nodemask_t mems_allowed;
1040
1041 seqcount_t mems_allowed_seq;
1042 int cpuset_mem_spread_rotor;
1043 int cpuset_slab_spread_rotor;
1044#endif
1045#ifdef CONFIG_CGROUPS
1046
1047 struct css_set __rcu *cgroups;
1048
1049 struct list_head cg_list;
1050#endif
1051#ifdef CONFIG_X86_CPU_RESCTRL
1052 u32 closid;
1053 u32 rmid;
1054#endif
1055#ifdef CONFIG_FUTEX
1056 struct robust_list_head __user *robust_list;
1057#ifdef CONFIG_COMPAT
1058 struct compat_robust_list_head __user *compat_robust_list;
1059#endif
1060 struct list_head pi_state_list;
1061 struct futex_pi_state *pi_state_cache;
1062 struct mutex futex_exit_mutex;
1063 unsigned int futex_state;
1064#endif
1065#ifdef CONFIG_PERF_EVENTS
1066 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1067 struct mutex perf_event_mutex;
1068 struct list_head perf_event_list;
1069#endif
1070#ifdef CONFIG_DEBUG_PREEMPT
1071 unsigned long preempt_disable_ip;
1072#endif
1073#ifdef CONFIG_NUMA
1074
1075 struct mempolicy *mempolicy;
1076 short il_prev;
1077 short pref_node_fork;
1078#endif
1079#ifdef CONFIG_NUMA_BALANCING
1080 int numa_scan_seq;
1081 unsigned int numa_scan_period;
1082 unsigned int numa_scan_period_max;
1083 int numa_preferred_nid;
1084 unsigned long numa_migrate_retry;
1085
1086 u64 node_stamp;
1087 u64 last_task_numa_placement;
1088 u64 last_sum_exec_runtime;
1089 struct callback_head numa_work;
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099 struct numa_group __rcu *numa_group;
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115 unsigned long *numa_faults;
1116 unsigned long total_numa_faults;
1117
1118
1119
1120
1121
1122
1123
1124 unsigned long numa_faults_locality[3];
1125
1126 unsigned long numa_pages_migrated;
1127#endif
1128
1129#ifdef CONFIG_RSEQ
1130 struct rseq __user *rseq;
1131 u32 rseq_sig;
1132
1133
1134
1135
1136 unsigned long rseq_event_mask;
1137#endif
1138
1139 struct tlbflush_unmap_batch tlb_ubc;
1140
1141 union {
1142 refcount_t rcu_users;
1143 struct rcu_head rcu;
1144 };
1145
1146
1147 struct pipe_inode_info *splice_pipe;
1148
1149 struct page_frag task_frag;
1150
1151#ifdef CONFIG_TASK_DELAY_ACCT
1152 struct task_delay_info *delays;
1153#endif
1154
1155#ifdef CONFIG_FAULT_INJECTION
1156 int make_it_fail;
1157 unsigned int fail_nth;
1158#endif
1159
1160
1161
1162
1163 int nr_dirtied;
1164 int nr_dirtied_pause;
1165
1166 unsigned long dirty_paused_when;
1167
1168#ifdef CONFIG_LATENCYTOP
1169 int latency_record_count;
1170 struct latency_record latency_record[LT_SAVECOUNT];
1171#endif
1172
1173
1174
1175
1176 u64 timer_slack_ns;
1177 u64 default_timer_slack_ns;
1178
1179#ifdef CONFIG_KASAN
1180 unsigned int kasan_depth;
1181#endif
1182
1183#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1184
1185 int curr_ret_stack;
1186 int curr_ret_depth;
1187
1188
1189 struct ftrace_ret_stack *ret_stack;
1190
1191
1192 unsigned long long ftrace_timestamp;
1193
1194
1195
1196
1197
1198 atomic_t trace_overrun;
1199
1200
1201 atomic_t tracing_graph_pause;
1202#endif
1203
1204#ifdef CONFIG_TRACING
1205
1206 unsigned long trace;
1207
1208
1209 unsigned long trace_recursion;
1210#endif
1211
1212#ifdef CONFIG_KCOV
1213
1214
1215
1216 unsigned int kcov_mode;
1217
1218
1219 unsigned int kcov_size;
1220
1221
1222 void *kcov_area;
1223
1224
1225 struct kcov *kcov;
1226
1227
1228 u64 kcov_handle;
1229
1230
1231 int kcov_sequence;
1232#endif
1233
1234#ifdef CONFIG_MEMCG
1235 struct mem_cgroup *memcg_in_oom;
1236 gfp_t memcg_oom_gfp_mask;
1237 int memcg_oom_order;
1238
1239
1240 unsigned int memcg_nr_pages_over_high;
1241
1242
1243 struct mem_cgroup *active_memcg;
1244#endif
1245
1246#ifdef CONFIG_BLK_CGROUP
1247 struct request_queue *throttle_queue;
1248#endif
1249
1250#ifdef CONFIG_UPROBES
1251 struct uprobe_task *utask;
1252#endif
1253#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1254 unsigned int sequential_io;
1255 unsigned int sequential_io_avg;
1256#endif
1257#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1258 unsigned long task_state_change;
1259#endif
1260 int pagefault_disabled;
1261#ifdef CONFIG_MMU
1262 struct task_struct *oom_reaper_list;
1263#endif
1264#ifdef CONFIG_VMAP_STACK
1265 struct vm_struct *stack_vm_area;
1266#endif
1267#ifdef CONFIG_THREAD_INFO_IN_TASK
1268
1269 refcount_t stack_refcount;
1270#endif
1271#ifdef CONFIG_LIVEPATCH
1272 int patch_state;
1273#endif
1274#ifdef CONFIG_SECURITY
1275
1276 void *security;
1277#endif
1278
1279#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1280 unsigned long lowest_stack;
1281 unsigned long prev_lowest_stack;
1282#endif
1283
1284
1285
1286
1287
1288 randomized_struct_fields_end
1289
1290
1291 struct thread_struct thread;
1292
1293
1294
1295
1296
1297
1298
1299};
1300
1301static inline struct pid *task_pid(struct task_struct *task)
1302{
1303 return task->thread_pid;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1318
1319static inline pid_t task_pid_nr(struct task_struct *tsk)
1320{
1321 return tsk->pid;
1322}
1323
1324static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1325{
1326 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1327}
1328
1329static inline pid_t task_pid_vnr(struct task_struct *tsk)
1330{
1331 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1332}
1333
1334
1335static inline pid_t task_tgid_nr(struct task_struct *tsk)
1336{
1337 return tsk->tgid;
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350static inline int pid_alive(const struct task_struct *p)
1351{
1352 return p->thread_pid != NULL;
1353}
1354
1355static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1356{
1357 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1358}
1359
1360static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1361{
1362 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1363}
1364
1365
1366static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1367{
1368 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1369}
1370
1371static inline pid_t task_session_vnr(struct task_struct *tsk)
1372{
1373 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1374}
1375
1376static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1377{
1378 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1379}
1380
1381static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1382{
1383 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1384}
1385
1386static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1387{
1388 pid_t pid = 0;
1389
1390 rcu_read_lock();
1391 if (pid_alive(tsk))
1392 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1393 rcu_read_unlock();
1394
1395 return pid;
1396}
1397
1398static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1399{
1400 return task_ppid_nr_ns(tsk, &init_pid_ns);
1401}
1402
1403
1404static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1405{
1406 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1407}
1408
1409#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1410#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1411
1412static inline unsigned int task_state_index(struct task_struct *tsk)
1413{
1414 unsigned int tsk_state = READ_ONCE(tsk->state);
1415 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1416
1417 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1418
1419 if (tsk_state == TASK_IDLE)
1420 state = TASK_REPORT_IDLE;
1421
1422 return fls(state);
1423}
1424
1425static inline char task_index_to_char(unsigned int state)
1426{
1427 static const char state_char[] = "RSDTtXZPI";
1428
1429 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1430
1431 return state_char[state];
1432}
1433
1434static inline char task_state_to_char(struct task_struct *tsk)
1435{
1436 return task_index_to_char(task_state_index(tsk));
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448static inline int is_global_init(struct task_struct *tsk)
1449{
1450 return task_tgid_nr(tsk) == 1;
1451}
1452
1453extern struct pid *cad_pid;
1454
1455
1456
1457
1458#define PF_IDLE 0x00000002
1459#define PF_EXITING 0x00000004
1460#define PF_VCPU 0x00000010
1461#define PF_WQ_WORKER 0x00000020
1462#define PF_FORKNOEXEC 0x00000040
1463#define PF_MCE_PROCESS 0x00000080
1464#define PF_SUPERPRIV 0x00000100
1465#define PF_DUMPCORE 0x00000200
1466#define PF_SIGNALED 0x00000400
1467#define PF_MEMALLOC 0x00000800
1468#define PF_NPROC_EXCEEDED 0x00001000
1469#define PF_USED_MATH 0x00002000
1470#define PF_USED_ASYNC 0x00004000
1471#define PF_NOFREEZE 0x00008000
1472#define PF_FROZEN 0x00010000
1473#define PF_KSWAPD 0x00020000
1474#define PF_MEMALLOC_NOFS 0x00040000
1475#define PF_MEMALLOC_NOIO 0x00080000
1476#define PF_LESS_THROTTLE 0x00100000
1477#define PF_KTHREAD 0x00200000
1478#define PF_RANDOMIZE 0x00400000
1479#define PF_SWAPWRITE 0x00800000
1480#define PF_MEMSTALL 0x01000000
1481#define PF_UMH 0x02000000
1482#define PF_NO_SETAFFINITY 0x04000000
1483#define PF_MCE_EARLY 0x08000000
1484#define PF_MEMALLOC_NOCMA 0x10000000
1485#define PF_IO_WORKER 0x20000000
1486#define PF_FREEZER_SKIP 0x40000000
1487#define PF_SUSPEND_TASK 0x80000000
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1501#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1502#define clear_used_math() clear_stopped_child_used_math(current)
1503#define set_used_math() set_stopped_child_used_math(current)
1504
1505#define conditional_stopped_child_used_math(condition, child) \
1506 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1507
1508#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1509
1510#define copy_to_stopped_child_used_math(child) \
1511 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1512
1513
1514#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1515#define used_math() tsk_used_math(current)
1516
1517static inline bool is_percpu_thread(void)
1518{
1519#ifdef CONFIG_SMP
1520 return (current->flags & PF_NO_SETAFFINITY) &&
1521 (current->nr_cpus_allowed == 1);
1522#else
1523 return true;
1524#endif
1525}
1526
1527
1528#define PFA_NO_NEW_PRIVS 0
1529#define PFA_SPREAD_PAGE 1
1530#define PFA_SPREAD_SLAB 2
1531#define PFA_SPEC_SSB_DISABLE 3
1532#define PFA_SPEC_SSB_FORCE_DISABLE 4
1533#define PFA_SPEC_IB_DISABLE 5
1534#define PFA_SPEC_IB_FORCE_DISABLE 6
1535#define PFA_SPEC_SSB_NOEXEC 7
1536
1537#define TASK_PFA_TEST(name, func) \
1538 static inline bool task_##func(struct task_struct *p) \
1539 { return test_bit(PFA_##name, &p->atomic_flags); }
1540
1541#define TASK_PFA_SET(name, func) \
1542 static inline void task_set_##func(struct task_struct *p) \
1543 { set_bit(PFA_##name, &p->atomic_flags); }
1544
1545#define TASK_PFA_CLEAR(name, func) \
1546 static inline void task_clear_##func(struct task_struct *p) \
1547 { clear_bit(PFA_##name, &p->atomic_flags); }
1548
1549TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1550TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1551
1552TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1553TASK_PFA_SET(SPREAD_PAGE, spread_page)
1554TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1555
1556TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1557TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1558TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1559
1560TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1561TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1562TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1563
1564TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1565TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1566TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1567
1568TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1569TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1570
1571TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1572TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1573TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1574
1575TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1576TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1577
1578static inline void
1579current_restore_flags(unsigned long orig_flags, unsigned long flags)
1580{
1581 current->flags &= ~flags;
1582 current->flags |= orig_flags & flags;
1583}
1584
1585extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1586extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1587#ifdef CONFIG_SMP
1588extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1589extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1590#else
1591static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1592{
1593}
1594static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1595{
1596 if (!cpumask_test_cpu(0, new_mask))
1597 return -EINVAL;
1598 return 0;
1599}
1600#endif
1601
1602extern int yield_to(struct task_struct *p, bool preempt);
1603extern void set_user_nice(struct task_struct *p, long nice);
1604extern int task_prio(const struct task_struct *p);
1605
1606
1607
1608
1609
1610
1611
1612static inline int task_nice(const struct task_struct *p)
1613{
1614 return PRIO_TO_NICE((p)->static_prio);
1615}
1616
1617extern int can_nice(const struct task_struct *p, const int nice);
1618extern int task_curr(const struct task_struct *p);
1619extern int idle_cpu(int cpu);
1620extern int available_idle_cpu(int cpu);
1621extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1622extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1623extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1624extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1625extern struct task_struct *idle_task(int cpu);
1626
1627
1628
1629
1630
1631
1632
1633static inline bool is_idle_task(const struct task_struct *p)
1634{
1635 return !!(p->flags & PF_IDLE);
1636}
1637
1638extern struct task_struct *curr_task(int cpu);
1639extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1640
1641void yield(void);
1642
1643union thread_union {
1644#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1645 struct task_struct task;
1646#endif
1647#ifndef CONFIG_THREAD_INFO_IN_TASK
1648 struct thread_info thread_info;
1649#endif
1650 unsigned long stack[THREAD_SIZE/sizeof(long)];
1651};
1652
1653#ifndef CONFIG_THREAD_INFO_IN_TASK
1654extern struct thread_info init_thread_info;
1655#endif
1656
1657extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1658
1659#ifdef CONFIG_THREAD_INFO_IN_TASK
1660static inline struct thread_info *task_thread_info(struct task_struct *task)
1661{
1662 return &task->thread_info;
1663}
1664#elif !defined(__HAVE_THREAD_FUNCTIONS)
1665# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1666#endif
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679extern struct task_struct *find_task_by_vpid(pid_t nr);
1680extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1681
1682
1683
1684
1685extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1686
1687extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1688extern int wake_up_process(struct task_struct *tsk);
1689extern void wake_up_new_task(struct task_struct *tsk);
1690
1691#ifdef CONFIG_SMP
1692extern void kick_process(struct task_struct *tsk);
1693#else
1694static inline void kick_process(struct task_struct *tsk) { }
1695#endif
1696
1697extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1698
1699static inline void set_task_comm(struct task_struct *tsk, const char *from)
1700{
1701 __set_task_comm(tsk, from, false);
1702}
1703
1704extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1705#define get_task_comm(buf, tsk) ({ \
1706 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1707 __get_task_comm(buf, sizeof(buf), tsk); \
1708})
1709
1710#ifdef CONFIG_SMP
1711void scheduler_ipi(void);
1712extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1713#else
1714static inline void scheduler_ipi(void) { }
1715static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1716{
1717 return 1;
1718}
1719#endif
1720
1721
1722
1723
1724
1725static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1726{
1727 set_ti_thread_flag(task_thread_info(tsk), flag);
1728}
1729
1730static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1731{
1732 clear_ti_thread_flag(task_thread_info(tsk), flag);
1733}
1734
1735static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1736 bool value)
1737{
1738 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1739}
1740
1741static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1742{
1743 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1744}
1745
1746static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1747{
1748 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1749}
1750
1751static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1752{
1753 return test_ti_thread_flag(task_thread_info(tsk), flag);
1754}
1755
1756static inline void set_tsk_need_resched(struct task_struct *tsk)
1757{
1758 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1759}
1760
1761static inline void clear_tsk_need_resched(struct task_struct *tsk)
1762{
1763 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1764}
1765
1766static inline int test_tsk_need_resched(struct task_struct *tsk)
1767{
1768 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1769}
1770
1771
1772
1773
1774
1775
1776
1777#ifndef CONFIG_PREEMPTION
1778extern int _cond_resched(void);
1779#else
1780static inline int _cond_resched(void) { return 0; }
1781#endif
1782
1783#define cond_resched() ({ \
1784 ___might_sleep(__FILE__, __LINE__, 0); \
1785 _cond_resched(); \
1786})
1787
1788extern int __cond_resched_lock(spinlock_t *lock);
1789
1790#define cond_resched_lock(lock) ({ \
1791 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1792 __cond_resched_lock(lock); \
1793})
1794
1795static inline void cond_resched_rcu(void)
1796{
1797#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1798 rcu_read_unlock();
1799 cond_resched();
1800 rcu_read_lock();
1801#endif
1802}
1803
1804
1805
1806
1807
1808
1809static inline int spin_needbreak(spinlock_t *lock)
1810{
1811#ifdef CONFIG_PREEMPTION
1812 return spin_is_contended(lock);
1813#else
1814 return 0;
1815#endif
1816}
1817
1818static __always_inline bool need_resched(void)
1819{
1820 return unlikely(tif_need_resched());
1821}
1822
1823
1824
1825
1826#ifdef CONFIG_SMP
1827
1828static inline unsigned int task_cpu(const struct task_struct *p)
1829{
1830#ifdef CONFIG_THREAD_INFO_IN_TASK
1831 return READ_ONCE(p->cpu);
1832#else
1833 return READ_ONCE(task_thread_info(p)->cpu);
1834#endif
1835}
1836
1837extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1838
1839#else
1840
1841static inline unsigned int task_cpu(const struct task_struct *p)
1842{
1843 return 0;
1844}
1845
1846static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1847{
1848}
1849
1850#endif
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860#ifndef vcpu_is_preempted
1861static inline bool vcpu_is_preempted(int cpu)
1862{
1863 return false;
1864}
1865#endif
1866
1867extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1868extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1869
1870#ifndef TASK_SIZE_OF
1871#define TASK_SIZE_OF(tsk) TASK_SIZE
1872#endif
1873
1874#ifdef CONFIG_RSEQ
1875
1876
1877
1878
1879
1880enum rseq_event_mask_bits {
1881 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1882 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1883 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1884};
1885
1886enum rseq_event_mask {
1887 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
1888 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
1889 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
1890};
1891
1892static inline void rseq_set_notify_resume(struct task_struct *t)
1893{
1894 if (t->rseq)
1895 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1896}
1897
1898void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1899
1900static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1901 struct pt_regs *regs)
1902{
1903 if (current->rseq)
1904 __rseq_handle_notify_resume(ksig, regs);
1905}
1906
1907static inline void rseq_signal_deliver(struct ksignal *ksig,
1908 struct pt_regs *regs)
1909{
1910 preempt_disable();
1911 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
1912 preempt_enable();
1913 rseq_handle_notify_resume(ksig, regs);
1914}
1915
1916
1917static inline void rseq_preempt(struct task_struct *t)
1918{
1919 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
1920 rseq_set_notify_resume(t);
1921}
1922
1923
1924static inline void rseq_migrate(struct task_struct *t)
1925{
1926 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
1927 rseq_set_notify_resume(t);
1928}
1929
1930
1931
1932
1933
1934static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1935{
1936 if (clone_flags & CLONE_VM) {
1937 t->rseq = NULL;
1938 t->rseq_sig = 0;
1939 t->rseq_event_mask = 0;
1940 } else {
1941 t->rseq = current->rseq;
1942 t->rseq_sig = current->rseq_sig;
1943 t->rseq_event_mask = current->rseq_event_mask;
1944 }
1945}
1946
1947static inline void rseq_execve(struct task_struct *t)
1948{
1949 t->rseq = NULL;
1950 t->rseq_sig = 0;
1951 t->rseq_event_mask = 0;
1952}
1953
1954#else
1955
1956static inline void rseq_set_notify_resume(struct task_struct *t)
1957{
1958}
1959static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1960 struct pt_regs *regs)
1961{
1962}
1963static inline void rseq_signal_deliver(struct ksignal *ksig,
1964 struct pt_regs *regs)
1965{
1966}
1967static inline void rseq_preempt(struct task_struct *t)
1968{
1969}
1970static inline void rseq_migrate(struct task_struct *t)
1971{
1972}
1973static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1974{
1975}
1976static inline void rseq_execve(struct task_struct *t)
1977{
1978}
1979
1980#endif
1981
1982void __exit_umh(struct task_struct *tsk);
1983
1984static inline void exit_umh(struct task_struct *tsk)
1985{
1986 if (unlikely(tsk->flags & PF_UMH))
1987 __exit_umh(tsk);
1988}
1989
1990#ifdef CONFIG_DEBUG_RSEQ
1991
1992void rseq_syscall(struct pt_regs *regs);
1993
1994#else
1995
1996static inline void rseq_syscall(struct pt_regs *regs)
1997{
1998}
1999
2000#endif
2001
2002const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2003char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2004int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2005
2006const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2007const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2008const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2009
2010int sched_trace_rq_cpu(struct rq *rq);
2011
2012const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2013
2014#endif
2015