1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/kcov.h>
18#include <linux/mutex.h>
19#include <linux/plist.h>
20#include <linux/hrtimer.h>
21#include <linux/seccomp.h>
22#include <linux/nodemask.h>
23#include <linux/rcupdate.h>
24#include <linux/resource.h>
25#include <linux/latencytop.h>
26#include <linux/sched/prio.h>
27#include <linux/signal_types.h>
28#include <linux/mm_types_task.h>
29#include <linux/task_io_accounting.h>
30
31
32struct audit_context;
33struct backing_dev_info;
34struct bio_list;
35struct blk_plug;
36struct cfs_rq;
37struct fs_struct;
38struct futex_pi_state;
39struct io_context;
40struct mempolicy;
41struct nameidata;
42struct nsproxy;
43struct perf_event_context;
44struct pid_namespace;
45struct pipe_inode_info;
46struct rcu_node;
47struct reclaim_state;
48struct robust_list_head;
49struct sched_attr;
50struct sched_param;
51struct seq_file;
52struct sighand_struct;
53struct signal_struct;
54struct task_delay_info;
55struct task_group;
56
57
58
59
60
61
62
63
64
65
66
67
68
69#define TASK_RUNNING 0x0000
70#define TASK_INTERRUPTIBLE 0x0001
71#define TASK_UNINTERRUPTIBLE 0x0002
72#define __TASK_STOPPED 0x0004
73#define __TASK_TRACED 0x0008
74
75#define EXIT_DEAD 0x0010
76#define EXIT_ZOMBIE 0x0020
77#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
78
79#define TASK_PARKED 0x0040
80#define TASK_DEAD 0x0080
81#define TASK_WAKEKILL 0x0100
82#define TASK_WAKING 0x0200
83#define TASK_NOLOAD 0x0400
84#define TASK_NEW 0x0800
85#define TASK_STATE_MAX 0x1000
86
87
88#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
89#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
90#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
91
92#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
93
94
95#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
96#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
97
98
99#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
100 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
101 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
102 TASK_PARKED)
103
104#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
105
106#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
107
108#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
109
110#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
111 (task->flags & PF_FROZEN) == 0 && \
112 (task->state & TASK_NOLOAD) == 0)
113
114#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
115
116#define __set_current_state(state_value) \
117 do { \
118 current->task_state_change = _THIS_IP_; \
119 current->state = (state_value); \
120 } while (0)
121#define set_current_state(state_value) \
122 do { \
123 current->task_state_change = _THIS_IP_; \
124 smp_store_mb(current->state, (state_value)); \
125 } while (0)
126
127#else
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
163#define set_current_state(state_value) smp_store_mb(current->state, (state_value))
164#endif
165
166
167#define TASK_COMM_LEN 16
168
169extern cpumask_var_t cpu_isolated_map;
170
171extern void scheduler_tick(void);
172
173#define MAX_SCHEDULE_TIMEOUT LONG_MAX
174
175extern long schedule_timeout(long timeout);
176extern long schedule_timeout_interruptible(long timeout);
177extern long schedule_timeout_killable(long timeout);
178extern long schedule_timeout_uninterruptible(long timeout);
179extern long schedule_timeout_idle(long timeout);
180asmlinkage void schedule(void);
181extern void schedule_preempt_disabled(void);
182
183extern int __must_check io_schedule_prepare(void);
184extern void io_schedule_finish(int token);
185extern long io_schedule_timeout(long timeout);
186extern void io_schedule(void);
187
188
189
190
191
192
193
194
195
196
197struct prev_cputime {
198#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
199 u64 utime;
200 u64 stime;
201 raw_spinlock_t lock;
202#endif
203};
204
205
206
207
208
209
210
211
212
213
214
215struct task_cputime {
216 u64 utime;
217 u64 stime;
218 unsigned long long sum_exec_runtime;
219};
220
221
222#define virt_exp utime
223#define prof_exp stime
224#define sched_exp sum_exec_runtime
225
226enum vtime_state {
227
228 VTIME_INACTIVE = 0,
229
230 VTIME_USER,
231
232 VTIME_SYS,
233};
234
235struct vtime {
236 seqcount_t seqcount;
237 unsigned long long starttime;
238 enum vtime_state state;
239 u64 utime;
240 u64 stime;
241 u64 gtime;
242};
243
244struct sched_info {
245#ifdef CONFIG_SCHED_INFO
246
247
248
249 unsigned long pcount;
250
251
252 unsigned long long run_delay;
253
254
255
256
257 unsigned long long last_arrival;
258
259
260 unsigned long long last_queued;
261
262#endif
263};
264
265
266
267
268
269
270
271
272# define SCHED_FIXEDPOINT_SHIFT 10
273# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
274
275struct load_weight {
276 unsigned long weight;
277 u32 inv_weight;
278};
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332struct sched_avg {
333 u64 last_update_time;
334 u64 load_sum;
335 u32 util_sum;
336 u32 period_contrib;
337 unsigned long load_avg;
338 unsigned long util_avg;
339};
340
341struct sched_statistics {
342#ifdef CONFIG_SCHEDSTATS
343 u64 wait_start;
344 u64 wait_max;
345 u64 wait_count;
346 u64 wait_sum;
347 u64 iowait_count;
348 u64 iowait_sum;
349
350 u64 sleep_start;
351 u64 sleep_max;
352 s64 sum_sleep_runtime;
353
354 u64 block_start;
355 u64 block_max;
356 u64 exec_max;
357 u64 slice_max;
358
359 u64 nr_migrations_cold;
360 u64 nr_failed_migrations_affine;
361 u64 nr_failed_migrations_running;
362 u64 nr_failed_migrations_hot;
363 u64 nr_forced_migrations;
364
365 u64 nr_wakeups;
366 u64 nr_wakeups_sync;
367 u64 nr_wakeups_migrate;
368 u64 nr_wakeups_local;
369 u64 nr_wakeups_remote;
370 u64 nr_wakeups_affine;
371 u64 nr_wakeups_affine_attempts;
372 u64 nr_wakeups_passive;
373 u64 nr_wakeups_idle;
374#endif
375};
376
377struct sched_entity {
378
379 struct load_weight load;
380 struct rb_node run_node;
381 struct list_head group_node;
382 unsigned int on_rq;
383
384 u64 exec_start;
385 u64 sum_exec_runtime;
386 u64 vruntime;
387 u64 prev_sum_exec_runtime;
388
389 u64 nr_migrations;
390
391 struct sched_statistics statistics;
392
393#ifdef CONFIG_FAIR_GROUP_SCHED
394 int depth;
395 struct sched_entity *parent;
396
397 struct cfs_rq *cfs_rq;
398
399 struct cfs_rq *my_q;
400#endif
401
402#ifdef CONFIG_SMP
403
404
405
406
407
408
409 struct sched_avg avg ____cacheline_aligned_in_smp;
410#endif
411};
412
413struct sched_rt_entity {
414 struct list_head run_list;
415 unsigned long timeout;
416 unsigned long watchdog_stamp;
417 unsigned int time_slice;
418 unsigned short on_rq;
419 unsigned short on_list;
420
421 struct sched_rt_entity *back;
422#ifdef CONFIG_RT_GROUP_SCHED
423 struct sched_rt_entity *parent;
424
425 struct rt_rq *rt_rq;
426
427 struct rt_rq *my_q;
428#endif
429} __randomize_layout;
430
431struct sched_dl_entity {
432 struct rb_node rb_node;
433
434
435
436
437
438
439 u64 dl_runtime;
440 u64 dl_deadline;
441 u64 dl_period;
442 u64 dl_bw;
443 u64 dl_density;
444
445
446
447
448
449
450 s64 runtime;
451 u64 deadline;
452 unsigned int flags;
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475 int dl_throttled;
476 int dl_boosted;
477 int dl_yielded;
478 int dl_non_contending;
479
480
481
482
483
484 struct hrtimer dl_timer;
485
486
487
488
489
490
491
492
493 struct hrtimer inactive_timer;
494};
495
496union rcu_special {
497 struct {
498 u8 blocked;
499 u8 need_qs;
500 u8 exp_need_qs;
501
502
503 u8 pad;
504 } b;
505 u32 s;
506};
507
508enum perf_event_task_context {
509 perf_invalid_context = -1,
510 perf_hw_context = 0,
511 perf_sw_context,
512 perf_nr_task_contexts,
513};
514
515struct wake_q_node {
516 struct wake_q_node *next;
517};
518
519struct task_struct {
520#ifdef CONFIG_THREAD_INFO_IN_TASK
521
522
523
524
525 struct thread_info thread_info;
526#endif
527
528 volatile long state;
529
530
531
532
533
534 randomized_struct_fields_start
535
536 void *stack;
537 atomic_t usage;
538
539 unsigned int flags;
540 unsigned int ptrace;
541
542#ifdef CONFIG_SMP
543 struct llist_node wake_entry;
544 int on_cpu;
545#ifdef CONFIG_THREAD_INFO_IN_TASK
546
547 unsigned int cpu;
548#endif
549 unsigned int wakee_flips;
550 unsigned long wakee_flip_decay_ts;
551 struct task_struct *last_wakee;
552
553 int wake_cpu;
554#endif
555 int on_rq;
556
557 int prio;
558 int static_prio;
559 int normal_prio;
560 unsigned int rt_priority;
561
562 const struct sched_class *sched_class;
563 struct sched_entity se;
564 struct sched_rt_entity rt;
565#ifdef CONFIG_CGROUP_SCHED
566 struct task_group *sched_task_group;
567#endif
568 struct sched_dl_entity dl;
569
570#ifdef CONFIG_PREEMPT_NOTIFIERS
571
572 struct hlist_head preempt_notifiers;
573#endif
574
575#ifdef CONFIG_BLK_DEV_IO_TRACE
576 unsigned int btrace_seq;
577#endif
578
579 unsigned int policy;
580 int nr_cpus_allowed;
581 cpumask_t cpus_allowed;
582
583#ifdef CONFIG_PREEMPT_RCU
584 int rcu_read_lock_nesting;
585 union rcu_special rcu_read_unlock_special;
586 struct list_head rcu_node_entry;
587 struct rcu_node *rcu_blocked_node;
588#endif
589
590#ifdef CONFIG_TASKS_RCU
591 unsigned long rcu_tasks_nvcsw;
592 u8 rcu_tasks_holdout;
593 u8 rcu_tasks_idx;
594 int rcu_tasks_idle_cpu;
595 struct list_head rcu_tasks_holdout_list;
596#endif
597
598 struct sched_info sched_info;
599
600 struct list_head tasks;
601#ifdef CONFIG_SMP
602 struct plist_node pushable_tasks;
603 struct rb_node pushable_dl_tasks;
604#endif
605
606 struct mm_struct *mm;
607 struct mm_struct *active_mm;
608
609
610 struct vmacache vmacache;
611
612#ifdef SPLIT_RSS_COUNTING
613 struct task_rss_stat rss_stat;
614#endif
615 int exit_state;
616 int exit_code;
617 int exit_signal;
618
619 int pdeath_signal;
620
621 unsigned long jobctl;
622
623
624 unsigned int personality;
625
626
627 unsigned sched_reset_on_fork:1;
628 unsigned sched_contributes_to_load:1;
629 unsigned sched_migrated:1;
630 unsigned sched_remote_wakeup:1;
631
632 unsigned :0;
633
634
635
636
637 unsigned in_execve:1;
638 unsigned in_iowait:1;
639#ifndef TIF_RESTORE_SIGMASK
640 unsigned restore_sigmask:1;
641#endif
642#ifdef CONFIG_MEMCG
643 unsigned memcg_may_oom:1;
644#ifndef CONFIG_SLOB
645 unsigned memcg_kmem_skip_account:1;
646#endif
647#endif
648#ifdef CONFIG_COMPAT_BRK
649 unsigned brk_randomized:1;
650#endif
651#ifdef CONFIG_CGROUPS
652
653 unsigned no_cgroup_migration:1;
654#endif
655
656 unsigned long atomic_flags;
657
658 struct restart_block restart_block;
659
660 pid_t pid;
661 pid_t tgid;
662
663#ifdef CONFIG_CC_STACKPROTECTOR
664
665 unsigned long stack_canary;
666#endif
667
668
669
670
671
672
673
674 struct task_struct __rcu *real_parent;
675
676
677 struct task_struct __rcu *parent;
678
679
680
681
682 struct list_head children;
683 struct list_head sibling;
684 struct task_struct *group_leader;
685
686
687
688
689
690
691
692 struct list_head ptraced;
693 struct list_head ptrace_entry;
694
695
696 struct pid_link pids[PIDTYPE_MAX];
697 struct list_head thread_group;
698 struct list_head thread_node;
699
700 struct completion *vfork_done;
701
702
703 int __user *set_child_tid;
704
705
706 int __user *clear_child_tid;
707
708 u64 utime;
709 u64 stime;
710#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
711 u64 utimescaled;
712 u64 stimescaled;
713#endif
714 u64 gtime;
715 struct prev_cputime prev_cputime;
716#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
717 struct vtime vtime;
718#endif
719
720#ifdef CONFIG_NO_HZ_FULL
721 atomic_t tick_dep_mask;
722#endif
723
724 unsigned long nvcsw;
725 unsigned long nivcsw;
726
727
728 u64 start_time;
729
730
731 u64 real_start_time;
732
733
734 unsigned long min_flt;
735 unsigned long maj_flt;
736
737#ifdef CONFIG_POSIX_TIMERS
738 struct task_cputime cputime_expires;
739 struct list_head cpu_timers[3];
740#endif
741
742
743
744
745 const struct cred __rcu *ptracer_cred;
746
747
748 const struct cred __rcu *real_cred;
749
750
751 const struct cred __rcu *cred;
752
753
754
755
756
757
758
759
760 char comm[TASK_COMM_LEN];
761
762 struct nameidata *nameidata;
763
764#ifdef CONFIG_SYSVIPC
765 struct sysv_sem sysvsem;
766 struct sysv_shm sysvshm;
767#endif
768#ifdef CONFIG_DETECT_HUNG_TASK
769 unsigned long last_switch_count;
770#endif
771
772 struct fs_struct *fs;
773
774
775 struct files_struct *files;
776
777
778 struct nsproxy *nsproxy;
779
780
781 struct signal_struct *signal;
782 struct sighand_struct *sighand;
783 sigset_t blocked;
784 sigset_t real_blocked;
785
786 sigset_t saved_sigmask;
787 struct sigpending pending;
788 unsigned long sas_ss_sp;
789 size_t sas_ss_size;
790 unsigned int sas_ss_flags;
791
792 struct callback_head *task_works;
793
794 struct audit_context *audit_context;
795#ifdef CONFIG_AUDITSYSCALL
796 kuid_t loginuid;
797 unsigned int sessionid;
798#endif
799 struct seccomp seccomp;
800
801
802 u32 parent_exec_id;
803 u32 self_exec_id;
804
805
806 spinlock_t alloc_lock;
807
808
809 raw_spinlock_t pi_lock;
810
811 struct wake_q_node wake_q;
812
813#ifdef CONFIG_RT_MUTEXES
814
815 struct rb_root_cached pi_waiters;
816
817 struct task_struct *pi_top_task;
818
819 struct rt_mutex_waiter *pi_blocked_on;
820#endif
821
822#ifdef CONFIG_DEBUG_MUTEXES
823
824 struct mutex_waiter *blocked_on;
825#endif
826
827#ifdef CONFIG_TRACE_IRQFLAGS
828 unsigned int irq_events;
829 unsigned long hardirq_enable_ip;
830 unsigned long hardirq_disable_ip;
831 unsigned int hardirq_enable_event;
832 unsigned int hardirq_disable_event;
833 int hardirqs_enabled;
834 int hardirq_context;
835 unsigned long softirq_disable_ip;
836 unsigned long softirq_enable_ip;
837 unsigned int softirq_disable_event;
838 unsigned int softirq_enable_event;
839 int softirqs_enabled;
840 int softirq_context;
841#endif
842
843#ifdef CONFIG_LOCKDEP
844# define MAX_LOCK_DEPTH 48UL
845 u64 curr_chain_key;
846 int lockdep_depth;
847 unsigned int lockdep_recursion;
848 struct held_lock held_locks[MAX_LOCK_DEPTH];
849#endif
850
851#ifdef CONFIG_LOCKDEP_CROSSRELEASE
852#define MAX_XHLOCKS_NR 64UL
853 struct hist_lock *xhlocks;
854 unsigned int xhlock_idx;
855
856 unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
857 unsigned int hist_id;
858
859 unsigned int hist_id_save[XHLOCK_CTX_NR];
860#endif
861
862#ifdef CONFIG_UBSAN
863 unsigned int in_ubsan;
864#endif
865
866
867 void *journal_info;
868
869
870 struct bio_list *bio_list;
871
872#ifdef CONFIG_BLOCK
873
874 struct blk_plug *plug;
875#endif
876
877
878 struct reclaim_state *reclaim_state;
879
880 struct backing_dev_info *backing_dev_info;
881
882 struct io_context *io_context;
883
884
885 unsigned long ptrace_message;
886 siginfo_t *last_siginfo;
887
888 struct task_io_accounting ioac;
889#ifdef CONFIG_TASK_XACCT
890
891 u64 acct_rss_mem1;
892
893 u64 acct_vm_mem1;
894
895 u64 acct_timexpd;
896#endif
897#ifdef CONFIG_CPUSETS
898
899 nodemask_t mems_allowed;
900
901 seqcount_t mems_allowed_seq;
902 int cpuset_mem_spread_rotor;
903 int cpuset_slab_spread_rotor;
904#endif
905#ifdef CONFIG_CGROUPS
906
907 struct css_set __rcu *cgroups;
908
909 struct list_head cg_list;
910#endif
911#ifdef CONFIG_INTEL_RDT
912 u32 closid;
913 u32 rmid;
914#endif
915#ifdef CONFIG_FUTEX
916 struct robust_list_head __user *robust_list;
917#ifdef CONFIG_COMPAT
918 struct compat_robust_list_head __user *compat_robust_list;
919#endif
920 struct list_head pi_state_list;
921 struct futex_pi_state *pi_state_cache;
922#endif
923#ifdef CONFIG_PERF_EVENTS
924 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
925 struct mutex perf_event_mutex;
926 struct list_head perf_event_list;
927#endif
928#ifdef CONFIG_DEBUG_PREEMPT
929 unsigned long preempt_disable_ip;
930#endif
931#ifdef CONFIG_NUMA
932
933 struct mempolicy *mempolicy;
934 short il_prev;
935 short pref_node_fork;
936#endif
937#ifdef CONFIG_NUMA_BALANCING
938 int numa_scan_seq;
939 unsigned int numa_scan_period;
940 unsigned int numa_scan_period_max;
941 int numa_preferred_nid;
942 unsigned long numa_migrate_retry;
943
944 u64 node_stamp;
945 u64 last_task_numa_placement;
946 u64 last_sum_exec_runtime;
947 struct callback_head numa_work;
948
949 struct list_head numa_entry;
950 struct numa_group *numa_group;
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966 unsigned long *numa_faults;
967 unsigned long total_numa_faults;
968
969
970
971
972
973
974
975 unsigned long numa_faults_locality[3];
976
977 unsigned long numa_pages_migrated;
978#endif
979
980 struct tlbflush_unmap_batch tlb_ubc;
981
982 struct rcu_head rcu;
983
984
985 struct pipe_inode_info *splice_pipe;
986
987 struct page_frag task_frag;
988
989#ifdef CONFIG_TASK_DELAY_ACCT
990 struct task_delay_info *delays;
991#endif
992
993#ifdef CONFIG_FAULT_INJECTION
994 int make_it_fail;
995 unsigned int fail_nth;
996#endif
997
998
999
1000
1001 int nr_dirtied;
1002 int nr_dirtied_pause;
1003
1004 unsigned long dirty_paused_when;
1005
1006#ifdef CONFIG_LATENCYTOP
1007 int latency_record_count;
1008 struct latency_record latency_record[LT_SAVECOUNT];
1009#endif
1010
1011
1012
1013
1014 u64 timer_slack_ns;
1015 u64 default_timer_slack_ns;
1016
1017#ifdef CONFIG_KASAN
1018 unsigned int kasan_depth;
1019#endif
1020
1021#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1022
1023 int curr_ret_stack;
1024
1025
1026 struct ftrace_ret_stack *ret_stack;
1027
1028
1029 unsigned long long ftrace_timestamp;
1030
1031
1032
1033
1034
1035 atomic_t trace_overrun;
1036
1037
1038 atomic_t tracing_graph_pause;
1039#endif
1040
1041#ifdef CONFIG_TRACING
1042
1043 unsigned long trace;
1044
1045
1046 unsigned long trace_recursion;
1047#endif
1048
1049#ifdef CONFIG_KCOV
1050
1051 enum kcov_mode kcov_mode;
1052
1053
1054 unsigned int kcov_size;
1055
1056
1057 void *kcov_area;
1058
1059
1060 struct kcov *kcov;
1061#endif
1062
1063#ifdef CONFIG_MEMCG
1064 struct mem_cgroup *memcg_in_oom;
1065 gfp_t memcg_oom_gfp_mask;
1066 int memcg_oom_order;
1067
1068
1069 unsigned int memcg_nr_pages_over_high;
1070#endif
1071
1072#ifdef CONFIG_UPROBES
1073 struct uprobe_task *utask;
1074#endif
1075#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1076 unsigned int sequential_io;
1077 unsigned int sequential_io_avg;
1078#endif
1079#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1080 unsigned long task_state_change;
1081#endif
1082 int pagefault_disabled;
1083#ifdef CONFIG_MMU
1084 struct task_struct *oom_reaper_list;
1085#endif
1086#ifdef CONFIG_VMAP_STACK
1087 struct vm_struct *stack_vm_area;
1088#endif
1089#ifdef CONFIG_THREAD_INFO_IN_TASK
1090
1091 atomic_t stack_refcount;
1092#endif
1093#ifdef CONFIG_LIVEPATCH
1094 int patch_state;
1095#endif
1096#ifdef CONFIG_SECURITY
1097
1098 void *security;
1099#endif
1100
1101
1102
1103
1104
1105 randomized_struct_fields_end
1106
1107
1108 struct thread_struct thread;
1109
1110
1111
1112
1113
1114
1115
1116};
1117
1118static inline struct pid *task_pid(struct task_struct *task)
1119{
1120 return task->pids[PIDTYPE_PID].pid;
1121}
1122
1123static inline struct pid *task_tgid(struct task_struct *task)
1124{
1125 return task->group_leader->pids[PIDTYPE_PID].pid;
1126}
1127
1128
1129
1130
1131
1132
1133static inline struct pid *task_pgrp(struct task_struct *task)
1134{
1135 return task->group_leader->pids[PIDTYPE_PGID].pid;
1136}
1137
1138static inline struct pid *task_session(struct task_struct *task)
1139{
1140 return task->group_leader->pids[PIDTYPE_SID].pid;
1141}
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1155
1156static inline pid_t task_pid_nr(struct task_struct *tsk)
1157{
1158 return tsk->pid;
1159}
1160
1161static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1162{
1163 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1164}
1165
1166static inline pid_t task_pid_vnr(struct task_struct *tsk)
1167{
1168 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1169}
1170
1171
1172static inline pid_t task_tgid_nr(struct task_struct *tsk)
1173{
1174 return tsk->tgid;
1175}
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static inline int pid_alive(const struct task_struct *p)
1188{
1189 return p->pids[PIDTYPE_PID].pid != NULL;
1190}
1191
1192static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1193{
1194 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1195}
1196
1197static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1198{
1199 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1200}
1201
1202
1203static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1204{
1205 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1206}
1207
1208static inline pid_t task_session_vnr(struct task_struct *tsk)
1209{
1210 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1211}
1212
1213static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1214{
1215 return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
1216}
1217
1218static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1219{
1220 return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
1221}
1222
1223static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1224{
1225 pid_t pid = 0;
1226
1227 rcu_read_lock();
1228 if (pid_alive(tsk))
1229 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1230 rcu_read_unlock();
1231
1232 return pid;
1233}
1234
1235static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1236{
1237 return task_ppid_nr_ns(tsk, &init_pid_ns);
1238}
1239
1240
1241static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1242{
1243 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1244}
1245
1246#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1247#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1248
1249static inline unsigned int __get_task_state(struct task_struct *tsk)
1250{
1251 unsigned int tsk_state = READ_ONCE(tsk->state);
1252 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1253
1254 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1255
1256 if (tsk_state == TASK_IDLE)
1257 state = TASK_REPORT_IDLE;
1258
1259 return fls(state);
1260}
1261
1262static inline char __task_state_to_char(unsigned int state)
1263{
1264 static const char state_char[] = "RSDTtXZPI";
1265
1266 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1267
1268 return state_char[state];
1269}
1270
1271static inline char task_state_to_char(struct task_struct *tsk)
1272{
1273 return __task_state_to_char(__get_task_state(tsk));
1274}
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285static inline int is_global_init(struct task_struct *tsk)
1286{
1287 return task_tgid_nr(tsk) == 1;
1288}
1289
1290extern struct pid *cad_pid;
1291
1292
1293
1294
1295#define PF_IDLE 0x00000002
1296#define PF_EXITING 0x00000004
1297#define PF_EXITPIDONE 0x00000008
1298#define PF_VCPU 0x00000010
1299#define PF_WQ_WORKER 0x00000020
1300#define PF_FORKNOEXEC 0x00000040
1301#define PF_MCE_PROCESS 0x00000080
1302#define PF_SUPERPRIV 0x00000100
1303#define PF_DUMPCORE 0x00000200
1304#define PF_SIGNALED 0x00000400
1305#define PF_MEMALLOC 0x00000800
1306#define PF_NPROC_EXCEEDED 0x00001000
1307#define PF_USED_MATH 0x00002000
1308#define PF_USED_ASYNC 0x00004000
1309#define PF_NOFREEZE 0x00008000
1310#define PF_FROZEN 0x00010000
1311#define PF_KSWAPD 0x00020000
1312#define PF_MEMALLOC_NOFS 0x00040000
1313#define PF_MEMALLOC_NOIO 0x00080000
1314#define PF_LESS_THROTTLE 0x00100000
1315#define PF_KTHREAD 0x00200000
1316#define PF_RANDOMIZE 0x00400000
1317#define PF_SWAPWRITE 0x00800000
1318#define PF_NO_SETAFFINITY 0x04000000
1319#define PF_MCE_EARLY 0x08000000
1320#define PF_MUTEX_TESTER 0x20000000
1321#define PF_FREEZER_SKIP 0x40000000
1322#define PF_SUSPEND_TASK 0x80000000
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1336#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1337#define clear_used_math() clear_stopped_child_used_math(current)
1338#define set_used_math() set_stopped_child_used_math(current)
1339
1340#define conditional_stopped_child_used_math(condition, child) \
1341 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1342
1343#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1344
1345#define copy_to_stopped_child_used_math(child) \
1346 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1347
1348
1349#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1350#define used_math() tsk_used_math(current)
1351
1352static inline bool is_percpu_thread(void)
1353{
1354#ifdef CONFIG_SMP
1355 return (current->flags & PF_NO_SETAFFINITY) &&
1356 (current->nr_cpus_allowed == 1);
1357#else
1358 return true;
1359#endif
1360}
1361
1362
1363#define PFA_NO_NEW_PRIVS 0
1364#define PFA_SPREAD_PAGE 1
1365#define PFA_SPREAD_SLAB 2
1366
1367
1368#define TASK_PFA_TEST(name, func) \
1369 static inline bool task_##func(struct task_struct *p) \
1370 { return test_bit(PFA_##name, &p->atomic_flags); }
1371
1372#define TASK_PFA_SET(name, func) \
1373 static inline void task_set_##func(struct task_struct *p) \
1374 { set_bit(PFA_##name, &p->atomic_flags); }
1375
1376#define TASK_PFA_CLEAR(name, func) \
1377 static inline void task_clear_##func(struct task_struct *p) \
1378 { clear_bit(PFA_##name, &p->atomic_flags); }
1379
1380TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1381TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1382
1383TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1384TASK_PFA_SET(SPREAD_PAGE, spread_page)
1385TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1386
1387TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1388TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1389TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1390
1391static inline void
1392current_restore_flags(unsigned long orig_flags, unsigned long flags)
1393{
1394 current->flags &= ~flags;
1395 current->flags |= orig_flags & flags;
1396}
1397
1398extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1399extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1400#ifdef CONFIG_SMP
1401extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1402extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1403#else
1404static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1405{
1406}
1407static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1408{
1409 if (!cpumask_test_cpu(0, new_mask))
1410 return -EINVAL;
1411 return 0;
1412}
1413#endif
1414
1415#ifndef cpu_relax_yield
1416#define cpu_relax_yield() cpu_relax()
1417#endif
1418
1419extern int yield_to(struct task_struct *p, bool preempt);
1420extern void set_user_nice(struct task_struct *p, long nice);
1421extern int task_prio(const struct task_struct *p);
1422
1423
1424
1425
1426
1427
1428
1429static inline int task_nice(const struct task_struct *p)
1430{
1431 return PRIO_TO_NICE((p)->static_prio);
1432}
1433
1434extern int can_nice(const struct task_struct *p, const int nice);
1435extern int task_curr(const struct task_struct *p);
1436extern int idle_cpu(int cpu);
1437extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1438extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1439extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1440extern struct task_struct *idle_task(int cpu);
1441
1442
1443
1444
1445
1446
1447
1448static inline bool is_idle_task(const struct task_struct *p)
1449{
1450 return !!(p->flags & PF_IDLE);
1451}
1452
1453extern struct task_struct *curr_task(int cpu);
1454extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1455
1456void yield(void);
1457
1458union thread_union {
1459#ifndef CONFIG_THREAD_INFO_IN_TASK
1460 struct thread_info thread_info;
1461#endif
1462 unsigned long stack[THREAD_SIZE/sizeof(long)];
1463};
1464
1465#ifdef CONFIG_THREAD_INFO_IN_TASK
1466static inline struct thread_info *task_thread_info(struct task_struct *task)
1467{
1468 return &task->thread_info;
1469}
1470#elif !defined(__HAVE_THREAD_FUNCTIONS)
1471# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1472#endif
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485extern struct task_struct *find_task_by_vpid(pid_t nr);
1486extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1487
1488extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1489extern int wake_up_process(struct task_struct *tsk);
1490extern void wake_up_new_task(struct task_struct *tsk);
1491
1492#ifdef CONFIG_SMP
1493extern void kick_process(struct task_struct *tsk);
1494#else
1495static inline void kick_process(struct task_struct *tsk) { }
1496#endif
1497
1498extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1499
1500static inline void set_task_comm(struct task_struct *tsk, const char *from)
1501{
1502 __set_task_comm(tsk, from, false);
1503}
1504
1505extern char *get_task_comm(char *to, struct task_struct *tsk);
1506
1507#ifdef CONFIG_SMP
1508void scheduler_ipi(void);
1509extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1510#else
1511static inline void scheduler_ipi(void) { }
1512static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1513{
1514 return 1;
1515}
1516#endif
1517
1518
1519
1520
1521
1522static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1523{
1524 set_ti_thread_flag(task_thread_info(tsk), flag);
1525}
1526
1527static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1528{
1529 clear_ti_thread_flag(task_thread_info(tsk), flag);
1530}
1531
1532static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1533{
1534 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1535}
1536
1537static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1538{
1539 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1540}
1541
1542static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1543{
1544 return test_ti_thread_flag(task_thread_info(tsk), flag);
1545}
1546
1547static inline void set_tsk_need_resched(struct task_struct *tsk)
1548{
1549 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1550}
1551
1552static inline void clear_tsk_need_resched(struct task_struct *tsk)
1553{
1554 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1555}
1556
1557static inline int test_tsk_need_resched(struct task_struct *tsk)
1558{
1559 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1560}
1561
1562
1563
1564
1565
1566
1567
1568
1569#ifndef CONFIG_PREEMPT
1570extern int _cond_resched(void);
1571#else
1572static inline int _cond_resched(void) { return 0; }
1573#endif
1574
1575#define cond_resched() ({ \
1576 ___might_sleep(__FILE__, __LINE__, 0); \
1577 _cond_resched(); \
1578})
1579
1580extern int __cond_resched_lock(spinlock_t *lock);
1581
1582#define cond_resched_lock(lock) ({ \
1583 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1584 __cond_resched_lock(lock); \
1585})
1586
1587extern int __cond_resched_softirq(void);
1588
1589#define cond_resched_softirq() ({ \
1590 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
1591 __cond_resched_softirq(); \
1592})
1593
1594static inline void cond_resched_rcu(void)
1595{
1596#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1597 rcu_read_unlock();
1598 cond_resched();
1599 rcu_read_lock();
1600#endif
1601}
1602
1603
1604
1605
1606
1607
1608static inline int spin_needbreak(spinlock_t *lock)
1609{
1610#ifdef CONFIG_PREEMPT
1611 return spin_is_contended(lock);
1612#else
1613 return 0;
1614#endif
1615}
1616
1617static __always_inline bool need_resched(void)
1618{
1619 return unlikely(tif_need_resched());
1620}
1621
1622
1623
1624
1625#ifdef CONFIG_SMP
1626
1627static inline unsigned int task_cpu(const struct task_struct *p)
1628{
1629#ifdef CONFIG_THREAD_INFO_IN_TASK
1630 return p->cpu;
1631#else
1632 return task_thread_info(p)->cpu;
1633#endif
1634}
1635
1636extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1637
1638#else
1639
1640static inline unsigned int task_cpu(const struct task_struct *p)
1641{
1642 return 0;
1643}
1644
1645static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1646{
1647}
1648
1649#endif
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659#ifndef vcpu_is_preempted
1660# define vcpu_is_preempted(cpu) false
1661#endif
1662
1663extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1664extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1665
1666#ifndef TASK_SIZE_OF
1667#define TASK_SIZE_OF(tsk) TASK_SIZE
1668#endif
1669
1670#endif
1671