1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/kcov.h>
18#include <linux/mutex.h>
19#include <linux/plist.h>
20#include <linux/hrtimer.h>
21#include <linux/seccomp.h>
22#include <linux/nodemask.h>
23#include <linux/rcupdate.h>
24#include <linux/refcount.h>
25#include <linux/resource.h>
26#include <linux/latencytop.h>
27#include <linux/sched/prio.h>
28#include <linux/sched/types.h>
29#include <linux/signal_types.h>
30#include <linux/mm_types_task.h>
31#include <linux/task_io_accounting.h>
32#include <linux/posix-timers.h>
33#include <linux/rseq.h>
34#include <linux/kcsan.h>
35
36
37struct audit_context;
38struct backing_dev_info;
39struct bio_list;
40struct blk_plug;
41struct capture_control;
42struct cfs_rq;
43struct fs_struct;
44struct futex_pi_state;
45struct io_context;
46struct mempolicy;
47struct nameidata;
48struct nsproxy;
49struct perf_event_context;
50struct pid_namespace;
51struct pipe_inode_info;
52struct rcu_node;
53struct reclaim_state;
54struct robust_list_head;
55struct root_domain;
56struct rq;
57struct sched_attr;
58struct sched_param;
59struct seq_file;
60struct sighand_struct;
61struct signal_struct;
62struct task_delay_info;
63struct task_group;
64
65
66
67
68
69
70
71
72
73
74
75
76
77#define TASK_RUNNING 0x0000
78#define TASK_INTERRUPTIBLE 0x0001
79#define TASK_UNINTERRUPTIBLE 0x0002
80#define __TASK_STOPPED 0x0004
81#define __TASK_TRACED 0x0008
82
83#define EXIT_DEAD 0x0010
84#define EXIT_ZOMBIE 0x0020
85#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
86
87#define TASK_PARKED 0x0040
88#define TASK_DEAD 0x0080
89#define TASK_WAKEKILL 0x0100
90#define TASK_WAKING 0x0200
91#define TASK_NOLOAD 0x0400
92#define TASK_NEW 0x0800
93#define TASK_STATE_MAX 0x1000
94
95
96#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
97#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
98#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
99
100#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
101
102
103#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
104
105
106#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
107 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
108 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
109 TASK_PARKED)
110
111#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
112
113#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
114
115#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
116
117#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
118
119
120
121
122
123#define is_special_task_state(state) \
124 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
125
126#define __set_current_state(state_value) \
127 do { \
128 WARN_ON_ONCE(is_special_task_state(state_value));\
129 current->task_state_change = _THIS_IP_; \
130 current->state = (state_value); \
131 } while (0)
132
133#define set_current_state(state_value) \
134 do { \
135 WARN_ON_ONCE(is_special_task_state(state_value));\
136 current->task_state_change = _THIS_IP_; \
137 smp_store_mb(current->state, (state_value)); \
138 } while (0)
139
140#define set_special_state(state_value) \
141 do { \
142 unsigned long flags; \
143 WARN_ON_ONCE(!is_special_task_state(state_value)); \
144 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
145 current->task_state_change = _THIS_IP_; \
146 current->state = (state_value); \
147 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
148 } while (0)
149#else
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187#define __set_current_state(state_value) \
188 current->state = (state_value)
189
190#define set_current_state(state_value) \
191 smp_store_mb(current->state, (state_value))
192
193
194
195
196
197
198
199#define set_special_state(state_value) \
200 do { \
201 unsigned long flags; \
202 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
203 current->state = (state_value); \
204 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
205 } while (0)
206
207#endif
208
209
210#define TASK_COMM_LEN 16
211
212extern void scheduler_tick(void);
213
214#define MAX_SCHEDULE_TIMEOUT LONG_MAX
215
216extern long schedule_timeout(long timeout);
217extern long schedule_timeout_interruptible(long timeout);
218extern long schedule_timeout_killable(long timeout);
219extern long schedule_timeout_uninterruptible(long timeout);
220extern long schedule_timeout_idle(long timeout);
221asmlinkage void schedule(void);
222extern void schedule_preempt_disabled(void);
223asmlinkage void preempt_schedule_irq(void);
224
225extern int __must_check io_schedule_prepare(void);
226extern void io_schedule_finish(int token);
227extern long io_schedule_timeout(long timeout);
228extern void io_schedule(void);
229
230
231
232
233
234
235
236
237
238
239struct prev_cputime {
240#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
241 u64 utime;
242 u64 stime;
243 raw_spinlock_t lock;
244#endif
245};
246
247enum vtime_state {
248
249 VTIME_INACTIVE = 0,
250
251 VTIME_IDLE,
252
253 VTIME_SYS,
254
255 VTIME_USER,
256
257 VTIME_GUEST,
258};
259
260struct vtime {
261 seqcount_t seqcount;
262 unsigned long long starttime;
263 enum vtime_state state;
264 unsigned int cpu;
265 u64 utime;
266 u64 stime;
267 u64 gtime;
268};
269
270
271
272
273
274
275
276enum uclamp_id {
277 UCLAMP_MIN = 0,
278 UCLAMP_MAX,
279 UCLAMP_CNT
280};
281
282#ifdef CONFIG_SMP
283extern struct root_domain def_root_domain;
284extern struct mutex sched_domains_mutex;
285#endif
286
287struct sched_info {
288#ifdef CONFIG_SCHED_INFO
289
290
291
292 unsigned long pcount;
293
294
295 unsigned long long run_delay;
296
297
298
299
300 unsigned long long last_arrival;
301
302
303 unsigned long long last_queued;
304
305#endif
306};
307
308
309
310
311
312
313
314
315# define SCHED_FIXEDPOINT_SHIFT 10
316# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
317
318
319# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
320# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
321
322struct load_weight {
323 unsigned long weight;
324 u32 inv_weight;
325};
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349struct util_est {
350 unsigned int enqueued;
351 unsigned int ewma;
352#define UTIL_EST_WEIGHT_SHIFT 2
353} __attribute__((__aligned__(sizeof(u64))));
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400struct sched_avg {
401 u64 last_update_time;
402 u64 load_sum;
403 u64 runnable_sum;
404 u32 util_sum;
405 u32 period_contrib;
406 unsigned long load_avg;
407 unsigned long runnable_avg;
408 unsigned long util_avg;
409 struct util_est util_est;
410} ____cacheline_aligned;
411
412struct sched_statistics {
413#ifdef CONFIG_SCHEDSTATS
414 u64 wait_start;
415 u64 wait_max;
416 u64 wait_count;
417 u64 wait_sum;
418 u64 iowait_count;
419 u64 iowait_sum;
420
421 u64 sleep_start;
422 u64 sleep_max;
423 s64 sum_sleep_runtime;
424
425 u64 block_start;
426 u64 block_max;
427 u64 exec_max;
428 u64 slice_max;
429
430 u64 nr_migrations_cold;
431 u64 nr_failed_migrations_affine;
432 u64 nr_failed_migrations_running;
433 u64 nr_failed_migrations_hot;
434 u64 nr_forced_migrations;
435
436 u64 nr_wakeups;
437 u64 nr_wakeups_sync;
438 u64 nr_wakeups_migrate;
439 u64 nr_wakeups_local;
440 u64 nr_wakeups_remote;
441 u64 nr_wakeups_affine;
442 u64 nr_wakeups_affine_attempts;
443 u64 nr_wakeups_passive;
444 u64 nr_wakeups_idle;
445#endif
446};
447
448struct sched_entity {
449
450 struct load_weight load;
451 struct rb_node run_node;
452 struct list_head group_node;
453 unsigned int on_rq;
454
455 u64 exec_start;
456 u64 sum_exec_runtime;
457 u64 vruntime;
458 u64 prev_sum_exec_runtime;
459
460 u64 nr_migrations;
461
462 struct sched_statistics statistics;
463
464#ifdef CONFIG_FAIR_GROUP_SCHED
465 int depth;
466 struct sched_entity *parent;
467
468 struct cfs_rq *cfs_rq;
469
470 struct cfs_rq *my_q;
471
472 unsigned long runnable_weight;
473#endif
474
475#ifdef CONFIG_SMP
476
477
478
479
480
481
482 struct sched_avg avg;
483#endif
484};
485
486struct sched_rt_entity {
487 struct list_head run_list;
488 unsigned long timeout;
489 unsigned long watchdog_stamp;
490 unsigned int time_slice;
491 unsigned short on_rq;
492 unsigned short on_list;
493
494 struct sched_rt_entity *back;
495#ifdef CONFIG_RT_GROUP_SCHED
496 struct sched_rt_entity *parent;
497
498 struct rt_rq *rt_rq;
499
500 struct rt_rq *my_q;
501#endif
502} __randomize_layout;
503
504struct sched_dl_entity {
505 struct rb_node rb_node;
506
507
508
509
510
511
512 u64 dl_runtime;
513 u64 dl_deadline;
514 u64 dl_period;
515 u64 dl_bw;
516 u64 dl_density;
517
518
519
520
521
522
523 s64 runtime;
524 u64 deadline;
525 unsigned int flags;
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551 unsigned int dl_throttled : 1;
552 unsigned int dl_boosted : 1;
553 unsigned int dl_yielded : 1;
554 unsigned int dl_non_contending : 1;
555 unsigned int dl_overrun : 1;
556
557
558
559
560
561 struct hrtimer dl_timer;
562
563
564
565
566
567
568
569
570 struct hrtimer inactive_timer;
571};
572
573#ifdef CONFIG_UCLAMP_TASK
574
575#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600struct uclamp_se {
601 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
602 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
603 unsigned int active : 1;
604 unsigned int user_defined : 1;
605};
606#endif
607
608union rcu_special {
609 struct {
610 u8 blocked;
611 u8 need_qs;
612 u8 exp_hint;
613 u8 need_mb;
614 } b;
615 u32 s;
616};
617
618enum perf_event_task_context {
619 perf_invalid_context = -1,
620 perf_hw_context = 0,
621 perf_sw_context,
622 perf_nr_task_contexts,
623};
624
625struct wake_q_node {
626 struct wake_q_node *next;
627};
628
629struct task_struct {
630#ifdef CONFIG_THREAD_INFO_IN_TASK
631
632
633
634
635 struct thread_info thread_info;
636#endif
637
638 volatile long state;
639
640
641
642
643
644 randomized_struct_fields_start
645
646 void *stack;
647 refcount_t usage;
648
649 unsigned int flags;
650 unsigned int ptrace;
651
652#ifdef CONFIG_SMP
653 int on_cpu;
654 struct __call_single_node wake_entry;
655#ifdef CONFIG_THREAD_INFO_IN_TASK
656
657 unsigned int cpu;
658#endif
659 unsigned int wakee_flips;
660 unsigned long wakee_flip_decay_ts;
661 struct task_struct *last_wakee;
662
663
664
665
666
667
668
669
670 int recent_used_cpu;
671 int wake_cpu;
672#endif
673 int on_rq;
674
675 int prio;
676 int static_prio;
677 int normal_prio;
678 unsigned int rt_priority;
679
680 const struct sched_class *sched_class;
681 struct sched_entity se;
682 struct sched_rt_entity rt;
683#ifdef CONFIG_CGROUP_SCHED
684 struct task_group *sched_task_group;
685#endif
686 struct sched_dl_entity dl;
687
688#ifdef CONFIG_UCLAMP_TASK
689
690 struct uclamp_se uclamp_req[UCLAMP_CNT];
691
692 struct uclamp_se uclamp[UCLAMP_CNT];
693#endif
694
695#ifdef CONFIG_PREEMPT_NOTIFIERS
696
697 struct hlist_head preempt_notifiers;
698#endif
699
700#ifdef CONFIG_BLK_DEV_IO_TRACE
701 unsigned int btrace_seq;
702#endif
703
704 unsigned int policy;
705 int nr_cpus_allowed;
706 const cpumask_t *cpus_ptr;
707 cpumask_t cpus_mask;
708
709#ifdef CONFIG_PREEMPT_RCU
710 int rcu_read_lock_nesting;
711 union rcu_special rcu_read_unlock_special;
712 struct list_head rcu_node_entry;
713 struct rcu_node *rcu_blocked_node;
714#endif
715
716#ifdef CONFIG_TASKS_RCU
717 unsigned long rcu_tasks_nvcsw;
718 u8 rcu_tasks_holdout;
719 u8 rcu_tasks_idx;
720 int rcu_tasks_idle_cpu;
721 struct list_head rcu_tasks_holdout_list;
722#endif
723
724#ifdef CONFIG_TASKS_TRACE_RCU
725 int trc_reader_nesting;
726 int trc_ipi_to_cpu;
727 union rcu_special trc_reader_special;
728 bool trc_reader_checked;
729 struct list_head trc_holdout_list;
730#endif
731
732 struct sched_info sched_info;
733
734 struct list_head tasks;
735#ifdef CONFIG_SMP
736 struct plist_node pushable_tasks;
737 struct rb_node pushable_dl_tasks;
738#endif
739
740 struct mm_struct *mm;
741 struct mm_struct *active_mm;
742
743
744 struct vmacache vmacache;
745
746#ifdef SPLIT_RSS_COUNTING
747 struct task_rss_stat rss_stat;
748#endif
749 int exit_state;
750 int exit_code;
751 int exit_signal;
752
753 int pdeath_signal;
754
755 unsigned long jobctl;
756
757
758 unsigned int personality;
759
760
761 unsigned sched_reset_on_fork:1;
762 unsigned sched_contributes_to_load:1;
763 unsigned sched_migrated:1;
764 unsigned sched_remote_wakeup:1;
765#ifdef CONFIG_PSI
766 unsigned sched_psi_wake_requeue:1;
767#endif
768
769
770 unsigned :0;
771
772
773
774
775 unsigned in_execve:1;
776 unsigned in_iowait:1;
777#ifndef TIF_RESTORE_SIGMASK
778 unsigned restore_sigmask:1;
779#endif
780#ifdef CONFIG_MEMCG
781 unsigned in_user_fault:1;
782#endif
783#ifdef CONFIG_COMPAT_BRK
784 unsigned brk_randomized:1;
785#endif
786#ifdef CONFIG_CGROUPS
787
788 unsigned no_cgroup_migration:1;
789
790 unsigned frozen:1;
791#endif
792#ifdef CONFIG_BLK_CGROUP
793 unsigned use_memdelay:1;
794#endif
795#ifdef CONFIG_PSI
796
797 unsigned in_memstall:1;
798#endif
799
800 unsigned long atomic_flags;
801
802 struct restart_block restart_block;
803
804 pid_t pid;
805 pid_t tgid;
806
807#ifdef CONFIG_STACKPROTECTOR
808
809 unsigned long stack_canary;
810#endif
811
812
813
814
815
816
817
818 struct task_struct __rcu *real_parent;
819
820
821 struct task_struct __rcu *parent;
822
823
824
825
826 struct list_head children;
827 struct list_head sibling;
828 struct task_struct *group_leader;
829
830
831
832
833
834
835
836 struct list_head ptraced;
837 struct list_head ptrace_entry;
838
839
840 struct pid *thread_pid;
841 struct hlist_node pid_links[PIDTYPE_MAX];
842 struct list_head thread_group;
843 struct list_head thread_node;
844
845 struct completion *vfork_done;
846
847
848 int __user *set_child_tid;
849
850
851 int __user *clear_child_tid;
852
853 u64 utime;
854 u64 stime;
855#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
856 u64 utimescaled;
857 u64 stimescaled;
858#endif
859 u64 gtime;
860 struct prev_cputime prev_cputime;
861#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
862 struct vtime vtime;
863#endif
864
865#ifdef CONFIG_NO_HZ_FULL
866 atomic_t tick_dep_mask;
867#endif
868
869 unsigned long nvcsw;
870 unsigned long nivcsw;
871
872
873 u64 start_time;
874
875
876 u64 start_boottime;
877
878
879 unsigned long min_flt;
880 unsigned long maj_flt;
881
882
883 struct posix_cputimers posix_cputimers;
884
885
886
887
888 const struct cred __rcu *ptracer_cred;
889
890
891 const struct cred __rcu *real_cred;
892
893
894 const struct cred __rcu *cred;
895
896#ifdef CONFIG_KEYS
897
898 struct key *cached_requested_key;
899#endif
900
901
902
903
904
905
906
907
908 char comm[TASK_COMM_LEN];
909
910 struct nameidata *nameidata;
911
912#ifdef CONFIG_SYSVIPC
913 struct sysv_sem sysvsem;
914 struct sysv_shm sysvshm;
915#endif
916#ifdef CONFIG_DETECT_HUNG_TASK
917 unsigned long last_switch_count;
918 unsigned long last_switch_time;
919#endif
920
921 struct fs_struct *fs;
922
923
924 struct files_struct *files;
925
926
927 struct nsproxy *nsproxy;
928
929
930 struct signal_struct *signal;
931 struct sighand_struct __rcu *sighand;
932 sigset_t blocked;
933 sigset_t real_blocked;
934
935 sigset_t saved_sigmask;
936 struct sigpending pending;
937 unsigned long sas_ss_sp;
938 size_t sas_ss_size;
939 unsigned int sas_ss_flags;
940
941 struct callback_head *task_works;
942
943#ifdef CONFIG_AUDIT
944#ifdef CONFIG_AUDITSYSCALL
945 struct audit_context *audit_context;
946#endif
947 kuid_t loginuid;
948 unsigned int sessionid;
949#endif
950 struct seccomp seccomp;
951
952
953 u64 parent_exec_id;
954 u64 self_exec_id;
955
956
957 spinlock_t alloc_lock;
958
959
960 raw_spinlock_t pi_lock;
961
962 struct wake_q_node wake_q;
963
964#ifdef CONFIG_RT_MUTEXES
965
966 struct rb_root_cached pi_waiters;
967
968 struct task_struct *pi_top_task;
969
970 struct rt_mutex_waiter *pi_blocked_on;
971#endif
972
973#ifdef CONFIG_DEBUG_MUTEXES
974
975 struct mutex_waiter *blocked_on;
976#endif
977
978#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
979 int non_block_count;
980#endif
981
982#ifdef CONFIG_TRACE_IRQFLAGS
983 unsigned int irq_events;
984 unsigned int hardirq_threaded;
985 unsigned long hardirq_enable_ip;
986 unsigned long hardirq_disable_ip;
987 unsigned int hardirq_enable_event;
988 unsigned int hardirq_disable_event;
989 int hardirqs_enabled;
990 int hardirq_context;
991 u64 hardirq_chain_key;
992 unsigned long softirq_disable_ip;
993 unsigned long softirq_enable_ip;
994 unsigned int softirq_disable_event;
995 unsigned int softirq_enable_event;
996 int softirqs_enabled;
997 int softirq_context;
998 int irq_config;
999#endif
1000
1001#ifdef CONFIG_LOCKDEP
1002# define MAX_LOCK_DEPTH 48UL
1003 u64 curr_chain_key;
1004 int lockdep_depth;
1005 unsigned int lockdep_recursion;
1006 struct held_lock held_locks[MAX_LOCK_DEPTH];
1007#endif
1008
1009#ifdef CONFIG_UBSAN
1010 unsigned int in_ubsan;
1011#endif
1012
1013
1014 void *journal_info;
1015
1016
1017 struct bio_list *bio_list;
1018
1019#ifdef CONFIG_BLOCK
1020
1021 struct blk_plug *plug;
1022#endif
1023
1024
1025 struct reclaim_state *reclaim_state;
1026
1027 struct backing_dev_info *backing_dev_info;
1028
1029 struct io_context *io_context;
1030
1031#ifdef CONFIG_COMPACTION
1032 struct capture_control *capture_control;
1033#endif
1034
1035 unsigned long ptrace_message;
1036 kernel_siginfo_t *last_siginfo;
1037
1038 struct task_io_accounting ioac;
1039#ifdef CONFIG_PSI
1040
1041 unsigned int psi_flags;
1042#endif
1043#ifdef CONFIG_TASK_XACCT
1044
1045 u64 acct_rss_mem1;
1046
1047 u64 acct_vm_mem1;
1048
1049 u64 acct_timexpd;
1050#endif
1051#ifdef CONFIG_CPUSETS
1052
1053 nodemask_t mems_allowed;
1054
1055 seqcount_t mems_allowed_seq;
1056 int cpuset_mem_spread_rotor;
1057 int cpuset_slab_spread_rotor;
1058#endif
1059#ifdef CONFIG_CGROUPS
1060
1061 struct css_set __rcu *cgroups;
1062
1063 struct list_head cg_list;
1064#endif
1065#ifdef CONFIG_X86_CPU_RESCTRL
1066 u32 closid;
1067 u32 rmid;
1068#endif
1069#ifdef CONFIG_FUTEX
1070 struct robust_list_head __user *robust_list;
1071#ifdef CONFIG_COMPAT
1072 struct compat_robust_list_head __user *compat_robust_list;
1073#endif
1074 struct list_head pi_state_list;
1075 struct futex_pi_state *pi_state_cache;
1076 struct mutex futex_exit_mutex;
1077 unsigned int futex_state;
1078#endif
1079#ifdef CONFIG_PERF_EVENTS
1080 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1081 struct mutex perf_event_mutex;
1082 struct list_head perf_event_list;
1083#endif
1084#ifdef CONFIG_DEBUG_PREEMPT
1085 unsigned long preempt_disable_ip;
1086#endif
1087#ifdef CONFIG_NUMA
1088
1089 struct mempolicy *mempolicy;
1090 short il_prev;
1091 short pref_node_fork;
1092#endif
1093#ifdef CONFIG_NUMA_BALANCING
1094 int numa_scan_seq;
1095 unsigned int numa_scan_period;
1096 unsigned int numa_scan_period_max;
1097 int numa_preferred_nid;
1098 unsigned long numa_migrate_retry;
1099
1100 u64 node_stamp;
1101 u64 last_task_numa_placement;
1102 u64 last_sum_exec_runtime;
1103 struct callback_head numa_work;
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 struct numa_group __rcu *numa_group;
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129 unsigned long *numa_faults;
1130 unsigned long total_numa_faults;
1131
1132
1133
1134
1135
1136
1137
1138 unsigned long numa_faults_locality[3];
1139
1140 unsigned long numa_pages_migrated;
1141#endif
1142
1143#ifdef CONFIG_RSEQ
1144 struct rseq __user *rseq;
1145 u32 rseq_sig;
1146
1147
1148
1149
1150 unsigned long rseq_event_mask;
1151#endif
1152
1153 struct tlbflush_unmap_batch tlb_ubc;
1154
1155 union {
1156 refcount_t rcu_users;
1157 struct rcu_head rcu;
1158 };
1159
1160
1161 struct pipe_inode_info *splice_pipe;
1162
1163 struct page_frag task_frag;
1164
1165#ifdef CONFIG_TASK_DELAY_ACCT
1166 struct task_delay_info *delays;
1167#endif
1168
1169#ifdef CONFIG_FAULT_INJECTION
1170 int make_it_fail;
1171 unsigned int fail_nth;
1172#endif
1173
1174
1175
1176
1177 int nr_dirtied;
1178 int nr_dirtied_pause;
1179
1180 unsigned long dirty_paused_when;
1181
1182#ifdef CONFIG_LATENCYTOP
1183 int latency_record_count;
1184 struct latency_record latency_record[LT_SAVECOUNT];
1185#endif
1186
1187
1188
1189
1190 u64 timer_slack_ns;
1191 u64 default_timer_slack_ns;
1192
1193#ifdef CONFIG_KASAN
1194 unsigned int kasan_depth;
1195#endif
1196#ifdef CONFIG_KCSAN
1197 struct kcsan_ctx kcsan_ctx;
1198#endif
1199
1200#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1201
1202 int curr_ret_stack;
1203 int curr_ret_depth;
1204
1205
1206 struct ftrace_ret_stack *ret_stack;
1207
1208
1209 unsigned long long ftrace_timestamp;
1210
1211
1212
1213
1214
1215 atomic_t trace_overrun;
1216
1217
1218 atomic_t tracing_graph_pause;
1219#endif
1220
1221#ifdef CONFIG_TRACING
1222
1223 unsigned long trace;
1224
1225
1226 unsigned long trace_recursion;
1227#endif
1228
1229#ifdef CONFIG_KCOV
1230
1231
1232
1233 unsigned int kcov_mode;
1234
1235
1236 unsigned int kcov_size;
1237
1238
1239 void *kcov_area;
1240
1241
1242 struct kcov *kcov;
1243
1244
1245 u64 kcov_handle;
1246
1247
1248 int kcov_sequence;
1249
1250
1251 unsigned int kcov_softirq;
1252#endif
1253
1254#ifdef CONFIG_MEMCG
1255 struct mem_cgroup *memcg_in_oom;
1256 gfp_t memcg_oom_gfp_mask;
1257 int memcg_oom_order;
1258
1259
1260 unsigned int memcg_nr_pages_over_high;
1261
1262
1263 struct mem_cgroup *active_memcg;
1264#endif
1265
1266#ifdef CONFIG_BLK_CGROUP
1267 struct request_queue *throttle_queue;
1268#endif
1269
1270#ifdef CONFIG_UPROBES
1271 struct uprobe_task *utask;
1272#endif
1273#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1274 unsigned int sequential_io;
1275 unsigned int sequential_io_avg;
1276#endif
1277#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1278 unsigned long task_state_change;
1279#endif
1280 int pagefault_disabled;
1281#ifdef CONFIG_MMU
1282 struct task_struct *oom_reaper_list;
1283#endif
1284#ifdef CONFIG_VMAP_STACK
1285 struct vm_struct *stack_vm_area;
1286#endif
1287#ifdef CONFIG_THREAD_INFO_IN_TASK
1288
1289 refcount_t stack_refcount;
1290#endif
1291#ifdef CONFIG_LIVEPATCH
1292 int patch_state;
1293#endif
1294#ifdef CONFIG_SECURITY
1295
1296 void *security;
1297#endif
1298
1299#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1300 unsigned long lowest_stack;
1301 unsigned long prev_lowest_stack;
1302#endif
1303
1304#ifdef CONFIG_X86_MCE
1305 u64 mce_addr;
1306 __u64 mce_ripv : 1,
1307 mce_whole_page : 1,
1308 __mce_reserved : 62;
1309 struct callback_head mce_kill_me;
1310#endif
1311
1312
1313
1314
1315
1316 randomized_struct_fields_end
1317
1318
1319 struct thread_struct thread;
1320
1321
1322
1323
1324
1325
1326
1327};
1328
1329static inline struct pid *task_pid(struct task_struct *task)
1330{
1331 return task->thread_pid;
1332}
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1346
1347static inline pid_t task_pid_nr(struct task_struct *tsk)
1348{
1349 return tsk->pid;
1350}
1351
1352static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1353{
1354 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1355}
1356
1357static inline pid_t task_pid_vnr(struct task_struct *tsk)
1358{
1359 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1360}
1361
1362
1363static inline pid_t task_tgid_nr(struct task_struct *tsk)
1364{
1365 return tsk->tgid;
1366}
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378static inline int pid_alive(const struct task_struct *p)
1379{
1380 return p->thread_pid != NULL;
1381}
1382
1383static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1384{
1385 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1386}
1387
1388static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1389{
1390 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1391}
1392
1393
1394static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1395{
1396 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1397}
1398
1399static inline pid_t task_session_vnr(struct task_struct *tsk)
1400{
1401 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1402}
1403
1404static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1405{
1406 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1407}
1408
1409static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1410{
1411 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1412}
1413
1414static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1415{
1416 pid_t pid = 0;
1417
1418 rcu_read_lock();
1419 if (pid_alive(tsk))
1420 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1421 rcu_read_unlock();
1422
1423 return pid;
1424}
1425
1426static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1427{
1428 return task_ppid_nr_ns(tsk, &init_pid_ns);
1429}
1430
1431
1432static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1433{
1434 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1435}
1436
1437#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1438#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1439
1440static inline unsigned int task_state_index(struct task_struct *tsk)
1441{
1442 unsigned int tsk_state = READ_ONCE(tsk->state);
1443 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1444
1445 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1446
1447 if (tsk_state == TASK_IDLE)
1448 state = TASK_REPORT_IDLE;
1449
1450 return fls(state);
1451}
1452
1453static inline char task_index_to_char(unsigned int state)
1454{
1455 static const char state_char[] = "RSDTtXZPI";
1456
1457 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1458
1459 return state_char[state];
1460}
1461
1462static inline char task_state_to_char(struct task_struct *tsk)
1463{
1464 return task_index_to_char(task_state_index(tsk));
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476static inline int is_global_init(struct task_struct *tsk)
1477{
1478 return task_tgid_nr(tsk) == 1;
1479}
1480
1481extern struct pid *cad_pid;
1482
1483
1484
1485
1486#define PF_IDLE 0x00000002
1487#define PF_EXITING 0x00000004
1488#define PF_VCPU 0x00000010
1489#define PF_WQ_WORKER 0x00000020
1490#define PF_FORKNOEXEC 0x00000040
1491#define PF_MCE_PROCESS 0x00000080
1492#define PF_SUPERPRIV 0x00000100
1493#define PF_DUMPCORE 0x00000200
1494#define PF_SIGNALED 0x00000400
1495#define PF_MEMALLOC 0x00000800
1496#define PF_NPROC_EXCEEDED 0x00001000
1497#define PF_USED_MATH 0x00002000
1498#define PF_USED_ASYNC 0x00004000
1499#define PF_NOFREEZE 0x00008000
1500#define PF_FROZEN 0x00010000
1501#define PF_KSWAPD 0x00020000
1502#define PF_MEMALLOC_NOFS 0x00040000
1503#define PF_MEMALLOC_NOIO 0x00080000
1504#define PF_LOCAL_THROTTLE 0x00100000
1505
1506#define PF_KTHREAD 0x00200000
1507#define PF_RANDOMIZE 0x00400000
1508#define PF_SWAPWRITE 0x00800000
1509#define PF_UMH 0x02000000
1510#define PF_NO_SETAFFINITY 0x04000000
1511#define PF_MCE_EARLY 0x08000000
1512#define PF_MEMALLOC_NOCMA 0x10000000
1513#define PF_IO_WORKER 0x20000000
1514#define PF_FREEZER_SKIP 0x40000000
1515#define PF_SUSPEND_TASK 0x80000000
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1529#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1530#define clear_used_math() clear_stopped_child_used_math(current)
1531#define set_used_math() set_stopped_child_used_math(current)
1532
1533#define conditional_stopped_child_used_math(condition, child) \
1534 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1535
1536#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1537
1538#define copy_to_stopped_child_used_math(child) \
1539 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1540
1541
1542#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1543#define used_math() tsk_used_math(current)
1544
1545static inline bool is_percpu_thread(void)
1546{
1547#ifdef CONFIG_SMP
1548 return (current->flags & PF_NO_SETAFFINITY) &&
1549 (current->nr_cpus_allowed == 1);
1550#else
1551 return true;
1552#endif
1553}
1554
1555
1556#define PFA_NO_NEW_PRIVS 0
1557#define PFA_SPREAD_PAGE 1
1558#define PFA_SPREAD_SLAB 2
1559#define PFA_SPEC_SSB_DISABLE 3
1560#define PFA_SPEC_SSB_FORCE_DISABLE 4
1561#define PFA_SPEC_IB_DISABLE 5
1562#define PFA_SPEC_IB_FORCE_DISABLE 6
1563#define PFA_SPEC_SSB_NOEXEC 7
1564
1565#define TASK_PFA_TEST(name, func) \
1566 static inline bool task_##func(struct task_struct *p) \
1567 { return test_bit(PFA_##name, &p->atomic_flags); }
1568
1569#define TASK_PFA_SET(name, func) \
1570 static inline void task_set_##func(struct task_struct *p) \
1571 { set_bit(PFA_##name, &p->atomic_flags); }
1572
1573#define TASK_PFA_CLEAR(name, func) \
1574 static inline void task_clear_##func(struct task_struct *p) \
1575 { clear_bit(PFA_##name, &p->atomic_flags); }
1576
1577TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1578TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1579
1580TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1581TASK_PFA_SET(SPREAD_PAGE, spread_page)
1582TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1583
1584TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1585TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1586TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1587
1588TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1589TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1590TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1591
1592TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1593TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1594TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1595
1596TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1597TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1598
1599TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1600TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1601TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1602
1603TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1604TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1605
1606static inline void
1607current_restore_flags(unsigned long orig_flags, unsigned long flags)
1608{
1609 current->flags &= ~flags;
1610 current->flags |= orig_flags & flags;
1611}
1612
1613extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1614extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1615#ifdef CONFIG_SMP
1616extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1617extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1618#else
1619static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1620{
1621}
1622static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1623{
1624 if (!cpumask_test_cpu(0, new_mask))
1625 return -EINVAL;
1626 return 0;
1627}
1628#endif
1629
1630extern int yield_to(struct task_struct *p, bool preempt);
1631extern void set_user_nice(struct task_struct *p, long nice);
1632extern int task_prio(const struct task_struct *p);
1633
1634
1635
1636
1637
1638
1639
1640static inline int task_nice(const struct task_struct *p)
1641{
1642 return PRIO_TO_NICE((p)->static_prio);
1643}
1644
1645extern int can_nice(const struct task_struct *p, const int nice);
1646extern int task_curr(const struct task_struct *p);
1647extern int idle_cpu(int cpu);
1648extern int available_idle_cpu(int cpu);
1649extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1650extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1651extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1652extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1653extern struct task_struct *idle_task(int cpu);
1654
1655
1656
1657
1658
1659
1660
1661static inline bool is_idle_task(const struct task_struct *p)
1662{
1663 return !!(p->flags & PF_IDLE);
1664}
1665
1666extern struct task_struct *curr_task(int cpu);
1667extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1668
1669void yield(void);
1670
1671union thread_union {
1672#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1673 struct task_struct task;
1674#endif
1675#ifndef CONFIG_THREAD_INFO_IN_TASK
1676 struct thread_info thread_info;
1677#endif
1678 unsigned long stack[THREAD_SIZE/sizeof(long)];
1679};
1680
1681#ifndef CONFIG_THREAD_INFO_IN_TASK
1682extern struct thread_info init_thread_info;
1683#endif
1684
1685extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1686
1687#ifdef CONFIG_THREAD_INFO_IN_TASK
1688static inline struct thread_info *task_thread_info(struct task_struct *task)
1689{
1690 return &task->thread_info;
1691}
1692#elif !defined(__HAVE_THREAD_FUNCTIONS)
1693# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1694#endif
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707extern struct task_struct *find_task_by_vpid(pid_t nr);
1708extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1709
1710
1711
1712
1713extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1714
1715extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1716extern int wake_up_process(struct task_struct *tsk);
1717extern void wake_up_new_task(struct task_struct *tsk);
1718
1719#ifdef CONFIG_SMP
1720extern void kick_process(struct task_struct *tsk);
1721#else
1722static inline void kick_process(struct task_struct *tsk) { }
1723#endif
1724
1725extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1726
1727static inline void set_task_comm(struct task_struct *tsk, const char *from)
1728{
1729 __set_task_comm(tsk, from, false);
1730}
1731
1732extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1733#define get_task_comm(buf, tsk) ({ \
1734 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1735 __get_task_comm(buf, sizeof(buf), tsk); \
1736})
1737
1738#ifdef CONFIG_SMP
1739static __always_inline void scheduler_ipi(void)
1740{
1741
1742
1743
1744
1745
1746 preempt_fold_need_resched();
1747}
1748extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1749#else
1750static inline void scheduler_ipi(void) { }
1751static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1752{
1753 return 1;
1754}
1755#endif
1756
1757
1758
1759
1760
1761static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1762{
1763 set_ti_thread_flag(task_thread_info(tsk), flag);
1764}
1765
1766static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1767{
1768 clear_ti_thread_flag(task_thread_info(tsk), flag);
1769}
1770
1771static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1772 bool value)
1773{
1774 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1775}
1776
1777static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1778{
1779 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1780}
1781
1782static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1783{
1784 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1785}
1786
1787static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1788{
1789 return test_ti_thread_flag(task_thread_info(tsk), flag);
1790}
1791
1792static inline void set_tsk_need_resched(struct task_struct *tsk)
1793{
1794 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1795}
1796
1797static inline void clear_tsk_need_resched(struct task_struct *tsk)
1798{
1799 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1800}
1801
1802static inline int test_tsk_need_resched(struct task_struct *tsk)
1803{
1804 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1805}
1806
1807
1808
1809
1810
1811
1812
1813#ifndef CONFIG_PREEMPTION
1814extern int _cond_resched(void);
1815#else
1816static inline int _cond_resched(void) { return 0; }
1817#endif
1818
1819#define cond_resched() ({ \
1820 ___might_sleep(__FILE__, __LINE__, 0); \
1821 _cond_resched(); \
1822})
1823
1824extern int __cond_resched_lock(spinlock_t *lock);
1825
1826#define cond_resched_lock(lock) ({ \
1827 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1828 __cond_resched_lock(lock); \
1829})
1830
1831static inline void cond_resched_rcu(void)
1832{
1833#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1834 rcu_read_unlock();
1835 cond_resched();
1836 rcu_read_lock();
1837#endif
1838}
1839
1840
1841
1842
1843
1844
1845static inline int spin_needbreak(spinlock_t *lock)
1846{
1847#ifdef CONFIG_PREEMPTION
1848 return spin_is_contended(lock);
1849#else
1850 return 0;
1851#endif
1852}
1853
1854static __always_inline bool need_resched(void)
1855{
1856 return unlikely(tif_need_resched());
1857}
1858
1859
1860
1861
1862#ifdef CONFIG_SMP
1863
1864static inline unsigned int task_cpu(const struct task_struct *p)
1865{
1866#ifdef CONFIG_THREAD_INFO_IN_TASK
1867 return READ_ONCE(p->cpu);
1868#else
1869 return READ_ONCE(task_thread_info(p)->cpu);
1870#endif
1871}
1872
1873extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1874
1875#else
1876
1877static inline unsigned int task_cpu(const struct task_struct *p)
1878{
1879 return 0;
1880}
1881
1882static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1883{
1884}
1885
1886#endif
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896#ifndef vcpu_is_preempted
1897static inline bool vcpu_is_preempted(int cpu)
1898{
1899 return false;
1900}
1901#endif
1902
1903extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1904extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1905
1906#ifndef TASK_SIZE_OF
1907#define TASK_SIZE_OF(tsk) TASK_SIZE
1908#endif
1909
1910#ifdef CONFIG_RSEQ
1911
1912
1913
1914
1915
1916enum rseq_event_mask_bits {
1917 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1918 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1919 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1920};
1921
1922enum rseq_event_mask {
1923 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
1924 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
1925 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
1926};
1927
1928static inline void rseq_set_notify_resume(struct task_struct *t)
1929{
1930 if (t->rseq)
1931 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1932}
1933
1934void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1935
1936static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1937 struct pt_regs *regs)
1938{
1939 if (current->rseq)
1940 __rseq_handle_notify_resume(ksig, regs);
1941}
1942
1943static inline void rseq_signal_deliver(struct ksignal *ksig,
1944 struct pt_regs *regs)
1945{
1946 preempt_disable();
1947 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
1948 preempt_enable();
1949 rseq_handle_notify_resume(ksig, regs);
1950}
1951
1952
1953static inline void rseq_preempt(struct task_struct *t)
1954{
1955 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
1956 rseq_set_notify_resume(t);
1957}
1958
1959
1960static inline void rseq_migrate(struct task_struct *t)
1961{
1962 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
1963 rseq_set_notify_resume(t);
1964}
1965
1966
1967
1968
1969
1970static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1971{
1972 if (clone_flags & CLONE_VM) {
1973 t->rseq = NULL;
1974 t->rseq_sig = 0;
1975 t->rseq_event_mask = 0;
1976 } else {
1977 t->rseq = current->rseq;
1978 t->rseq_sig = current->rseq_sig;
1979 t->rseq_event_mask = current->rseq_event_mask;
1980 }
1981}
1982
1983static inline void rseq_execve(struct task_struct *t)
1984{
1985 t->rseq = NULL;
1986 t->rseq_sig = 0;
1987 t->rseq_event_mask = 0;
1988}
1989
1990#else
1991
1992static inline void rseq_set_notify_resume(struct task_struct *t)
1993{
1994}
1995static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1996 struct pt_regs *regs)
1997{
1998}
1999static inline void rseq_signal_deliver(struct ksignal *ksig,
2000 struct pt_regs *regs)
2001{
2002}
2003static inline void rseq_preempt(struct task_struct *t)
2004{
2005}
2006static inline void rseq_migrate(struct task_struct *t)
2007{
2008}
2009static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2010{
2011}
2012static inline void rseq_execve(struct task_struct *t)
2013{
2014}
2015
2016#endif
2017
2018void __exit_umh(struct task_struct *tsk);
2019
2020static inline void exit_umh(struct task_struct *tsk)
2021{
2022 if (unlikely(tsk->flags & PF_UMH))
2023 __exit_umh(tsk);
2024}
2025
2026#ifdef CONFIG_DEBUG_RSEQ
2027
2028void rseq_syscall(struct pt_regs *regs);
2029
2030#else
2031
2032static inline void rseq_syscall(struct pt_regs *regs)
2033{
2034}
2035
2036#endif
2037
2038const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2039char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2040int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2041
2042const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2043const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2044const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2045
2046int sched_trace_rq_cpu(struct rq *rq);
2047
2048const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2049
2050#endif
2051