1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6#include <linux/sched/prio.h>
7
8
9struct sched_param {
10 int sched_priority;
11};
12
13#include <asm/param.h>
14
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
21#include <linux/plist.h>
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
27#include <linux/mm_types.h>
28#include <linux/preempt.h>
29
30#include <asm/page.h>
31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/shm.h>
37#include <linux/signal.h>
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/proportions.h>
44#include <linux/seccomp.h>
45#include <linux/rcupdate.h>
46#include <linux/rculist.h>
47#include <linux/rtmutex.h>
48
49#include <linux/time.h>
50#include <linux/param.h>
51#include <linux/resource.h>
52#include <linux/timer.h>
53#include <linux/hrtimer.h>
54#include <linux/task_io_accounting.h>
55#include <linux/latencytop.h>
56#include <linux/cred.h>
57#include <linux/llist.h>
58#include <linux/uidgid.h>
59#include <linux/gfp.h>
60#include <linux/magic.h>
61#include <linux/cgroup-defs.h>
62
63#include <asm/processor.h>
64
65#define SCHED_ATTR_SIZE_VER0 48
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111struct sched_attr {
112 u32 size;
113
114 u32 sched_policy;
115 u64 sched_flags;
116
117
118 s32 sched_nice;
119
120
121 u32 sched_priority;
122
123
124 u64 sched_runtime;
125 u64 sched_deadline;
126 u64 sched_period;
127};
128
129struct futex_pi_state;
130struct robust_list_head;
131struct bio_list;
132struct fs_struct;
133struct perf_event_context;
134struct blk_plug;
135struct filename;
136struct nameidata;
137
138#define VMACACHE_BITS 2
139#define VMACACHE_SIZE (1U << VMACACHE_BITS)
140#define VMACACHE_MASK (VMACACHE_SIZE - 1)
141
142
143
144
145
146
147
148
149
150
151
152extern unsigned long avenrun[];
153extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
154
155#define FSHIFT 11
156#define FIXED_1 (1<<FSHIFT)
157#define LOAD_FREQ (5*HZ+1)
158#define EXP_1 1884
159#define EXP_5 2014
160#define EXP_15 2037
161
162#define CALC_LOAD(load,exp,n) \
163 load *= exp; \
164 load += n*(FIXED_1-exp); \
165 load >>= FSHIFT;
166
167extern unsigned long total_forks;
168extern int nr_threads;
169DECLARE_PER_CPU(unsigned long, process_counts);
170extern int nr_processes(void);
171extern unsigned long nr_running(void);
172extern bool single_task_running(void);
173extern unsigned long nr_iowait(void);
174extern unsigned long nr_iowait_cpu(int cpu);
175extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
176
177extern void calc_global_load(unsigned long ticks);
178
179#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
180extern void update_cpu_load_nohz(void);
181#else
182static inline void update_cpu_load_nohz(void) { }
183#endif
184
185extern unsigned long get_parent_ip(unsigned long addr);
186
187extern void dump_cpu_task(int cpu);
188
189struct seq_file;
190struct cfs_rq;
191struct task_group;
192#ifdef CONFIG_SCHED_DEBUG
193extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
194extern void proc_sched_set_task(struct task_struct *p);
195#endif
196
197
198
199
200
201
202
203
204
205
206
207#define TASK_RUNNING 0
208#define TASK_INTERRUPTIBLE 1
209#define TASK_UNINTERRUPTIBLE 2
210#define __TASK_STOPPED 4
211#define __TASK_TRACED 8
212
213#define EXIT_DEAD 16
214#define EXIT_ZOMBIE 32
215#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
216
217#define TASK_DEAD 64
218#define TASK_WAKEKILL 128
219#define TASK_WAKING 256
220#define TASK_PARKED 512
221#define TASK_NOLOAD 1024
222#define TASK_STATE_MAX 2048
223
224#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
225
226extern char ___assert_task_state[1 - 2*!!(
227 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
228
229
230#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
231#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
232#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
233
234#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
235
236
237#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
238#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
239
240
241#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
242 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
243 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
244
245#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
246#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
247#define task_is_stopped_or_traced(task) \
248 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
249#define task_contributes_to_load(task) \
250 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
251 (task->flags & PF_FROZEN) == 0 && \
252 (task->state & TASK_NOLOAD) == 0)
253
254#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
255
256#define __set_task_state(tsk, state_value) \
257 do { \
258 (tsk)->task_state_change = _THIS_IP_; \
259 (tsk)->state = (state_value); \
260 } while (0)
261#define set_task_state(tsk, state_value) \
262 do { \
263 (tsk)->task_state_change = _THIS_IP_; \
264 smp_store_mb((tsk)->state, (state_value)); \
265 } while (0)
266
267
268
269
270
271
272
273
274
275
276
277
278#define __set_current_state(state_value) \
279 do { \
280 current->task_state_change = _THIS_IP_; \
281 current->state = (state_value); \
282 } while (0)
283#define set_current_state(state_value) \
284 do { \
285 current->task_state_change = _THIS_IP_; \
286 smp_store_mb(current->state, (state_value)); \
287 } while (0)
288
289#else
290
291#define __set_task_state(tsk, state_value) \
292 do { (tsk)->state = (state_value); } while (0)
293#define set_task_state(tsk, state_value) \
294 smp_store_mb((tsk)->state, (state_value))
295
296
297
298
299
300
301
302
303
304
305
306
307#define __set_current_state(state_value) \
308 do { current->state = (state_value); } while (0)
309#define set_current_state(state_value) \
310 smp_store_mb(current->state, (state_value))
311
312#endif
313
314
315#define TASK_COMM_LEN 16
316
317#include <linux/spinlock.h>
318
319
320
321
322
323
324
325extern rwlock_t tasklist_lock;
326extern spinlock_t mmlist_lock;
327
328struct task_struct;
329
330#ifdef CONFIG_PROVE_RCU
331extern int lockdep_tasklist_lock_is_held(void);
332#endif
333
334extern void sched_init(void);
335extern void sched_init_smp(void);
336extern asmlinkage void schedule_tail(struct task_struct *prev);
337extern void init_idle(struct task_struct *idle, int cpu);
338extern void init_idle_bootup_task(struct task_struct *idle);
339
340extern cpumask_var_t cpu_isolated_map;
341
342extern int runqueue_is_locked(int cpu);
343
344#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
345extern void nohz_balance_enter_idle(int cpu);
346extern void set_cpu_sd_state_idle(void);
347extern int get_nohz_timer_target(void);
348#else
349static inline void nohz_balance_enter_idle(int cpu) { }
350static inline void set_cpu_sd_state_idle(void) { }
351#endif
352
353
354
355
356extern void show_state_filter(unsigned long state_filter);
357
358static inline void show_state(void)
359{
360 show_state_filter(0);
361}
362
363extern void show_regs(struct pt_regs *);
364
365
366
367
368
369
370extern void show_stack(struct task_struct *task, unsigned long *sp);
371
372extern void cpu_init (void);
373extern void trap_init(void);
374extern void update_process_times(int user);
375extern void scheduler_tick(void);
376
377extern void sched_show_task(struct task_struct *p);
378
379#ifdef CONFIG_LOCKUP_DETECTOR
380extern void touch_softlockup_watchdog(void);
381extern void touch_softlockup_watchdog_sync(void);
382extern void touch_all_softlockup_watchdogs(void);
383extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
384 void __user *buffer,
385 size_t *lenp, loff_t *ppos);
386extern unsigned int softlockup_panic;
387extern unsigned int hardlockup_panic;
388void lockup_detector_init(void);
389#else
390static inline void touch_softlockup_watchdog(void)
391{
392}
393static inline void touch_softlockup_watchdog_sync(void)
394{
395}
396static inline void touch_all_softlockup_watchdogs(void)
397{
398}
399static inline void lockup_detector_init(void)
400{
401}
402#endif
403
404#ifdef CONFIG_DETECT_HUNG_TASK
405void reset_hung_task_detector(void);
406#else
407static inline void reset_hung_task_detector(void)
408{
409}
410#endif
411
412
413#define __sched __attribute__((__section__(".sched.text")))
414
415
416extern char __sched_text_start[], __sched_text_end[];
417
418
419extern int in_sched_functions(unsigned long addr);
420
421#define MAX_SCHEDULE_TIMEOUT LONG_MAX
422extern signed long schedule_timeout(signed long timeout);
423extern signed long schedule_timeout_interruptible(signed long timeout);
424extern signed long schedule_timeout_killable(signed long timeout);
425extern signed long schedule_timeout_uninterruptible(signed long timeout);
426asmlinkage void schedule(void);
427extern void schedule_preempt_disabled(void);
428
429extern long io_schedule_timeout(long timeout);
430
431static inline void io_schedule(void)
432{
433 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
434}
435
436struct nsproxy;
437struct user_namespace;
438
439#ifdef CONFIG_MMU
440extern void arch_pick_mmap_layout(struct mm_struct *mm);
441extern unsigned long
442arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
443 unsigned long, unsigned long);
444extern unsigned long
445arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
446 unsigned long len, unsigned long pgoff,
447 unsigned long flags);
448#else
449static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
450#endif
451
452#define SUID_DUMP_DISABLE 0
453#define SUID_DUMP_USER 1
454#define SUID_DUMP_ROOT 2
455
456
457
458
459#define MMF_DUMPABLE_BITS 2
460#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
461
462extern void set_dumpable(struct mm_struct *mm, int value);
463
464
465
466
467
468
469static inline int __get_dumpable(unsigned long mm_flags)
470{
471 return mm_flags & MMF_DUMPABLE_MASK;
472}
473
474static inline int get_dumpable(struct mm_struct *mm)
475{
476 return __get_dumpable(mm->flags);
477}
478
479
480#define MMF_DUMP_ANON_PRIVATE 2
481#define MMF_DUMP_ANON_SHARED 3
482#define MMF_DUMP_MAPPED_PRIVATE 4
483#define MMF_DUMP_MAPPED_SHARED 5
484#define MMF_DUMP_ELF_HEADERS 6
485#define MMF_DUMP_HUGETLB_PRIVATE 7
486#define MMF_DUMP_HUGETLB_SHARED 8
487#define MMF_DUMP_DAX_PRIVATE 9
488#define MMF_DUMP_DAX_SHARED 10
489
490#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
491#define MMF_DUMP_FILTER_BITS 9
492#define MMF_DUMP_FILTER_MASK \
493 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
494#define MMF_DUMP_FILTER_DEFAULT \
495 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
496 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
497
498#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
499# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
500#else
501# define MMF_DUMP_MASK_DEFAULT_ELF 0
502#endif
503
504#define MMF_VM_MERGEABLE 16
505#define MMF_VM_HUGEPAGE 17
506#define MMF_EXE_FILE_CHANGED 18
507
508#define MMF_HAS_UPROBES 19
509#define MMF_RECALC_UPROBES 20
510
511#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
512
513struct sighand_struct {
514 atomic_t count;
515 struct k_sigaction action[_NSIG];
516 spinlock_t siglock;
517 wait_queue_head_t signalfd_wqh;
518};
519
520struct pacct_struct {
521 int ac_flag;
522 long ac_exitcode;
523 unsigned long ac_mem;
524 cputime_t ac_utime, ac_stime;
525 unsigned long ac_minflt, ac_majflt;
526};
527
528struct cpu_itimer {
529 cputime_t expires;
530 cputime_t incr;
531 u32 error;
532 u32 incr_error;
533};
534
535
536
537
538
539
540
541
542
543
544struct prev_cputime {
545#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
546 cputime_t utime;
547 cputime_t stime;
548 raw_spinlock_t lock;
549#endif
550};
551
552static inline void prev_cputime_init(struct prev_cputime *prev)
553{
554#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
555 prev->utime = prev->stime = 0;
556 raw_spin_lock_init(&prev->lock);
557#endif
558}
559
560
561
562
563
564
565
566
567
568
569
570struct task_cputime {
571 cputime_t utime;
572 cputime_t stime;
573 unsigned long long sum_exec_runtime;
574};
575
576
577#define virt_exp utime
578#define prof_exp stime
579#define sched_exp sum_exec_runtime
580
581#define INIT_CPUTIME \
582 (struct task_cputime) { \
583 .utime = 0, \
584 .stime = 0, \
585 .sum_exec_runtime = 0, \
586 }
587
588
589
590
591
592struct task_cputime_atomic {
593 atomic64_t utime;
594 atomic64_t stime;
595 atomic64_t sum_exec_runtime;
596};
597
598#define INIT_CPUTIME_ATOMIC \
599 (struct task_cputime_atomic) { \
600 .utime = ATOMIC64_INIT(0), \
601 .stime = ATOMIC64_INIT(0), \
602 .sum_exec_runtime = ATOMIC64_INIT(0), \
603 }
604
605#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
606
607
608
609
610
611
612
613#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
614
615
616
617
618
619
620
621
622
623
624#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
625
626
627
628
629
630
631
632
633
634
635
636
637struct thread_group_cputimer {
638 struct task_cputime_atomic cputime_atomic;
639 bool running;
640 bool checking_timer;
641};
642
643#include <linux/rwsem.h>
644struct autogroup;
645
646
647
648
649
650
651
652
653struct signal_struct {
654 atomic_t sigcnt;
655 atomic_t live;
656 int nr_threads;
657 struct list_head thread_head;
658
659 wait_queue_head_t wait_chldexit;
660
661
662 struct task_struct *curr_target;
663
664
665 struct sigpending shared_pending;
666
667
668 int group_exit_code;
669
670
671
672
673
674 int notify_count;
675 struct task_struct *group_exit_task;
676
677
678 int group_stop_count;
679 unsigned int flags;
680
681
682
683
684
685
686
687
688
689
690 unsigned int is_child_subreaper:1;
691 unsigned int has_child_subreaper:1;
692
693
694 int posix_timer_id;
695 struct list_head posix_timers;
696
697
698 struct hrtimer real_timer;
699 struct pid *leader_pid;
700 ktime_t it_real_incr;
701
702
703
704
705
706
707 struct cpu_itimer it[2];
708
709
710
711
712
713 struct thread_group_cputimer cputimer;
714
715
716 struct task_cputime cputime_expires;
717
718 struct list_head cpu_timers[3];
719
720 struct pid *tty_old_pgrp;
721
722
723 int leader;
724
725 struct tty_struct *tty;
726
727#ifdef CONFIG_SCHED_AUTOGROUP
728 struct autogroup *autogroup;
729#endif
730
731
732
733
734
735
736 seqlock_t stats_lock;
737 cputime_t utime, stime, cutime, cstime;
738 cputime_t gtime;
739 cputime_t cgtime;
740 struct prev_cputime prev_cputime;
741 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
742 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
743 unsigned long inblock, oublock, cinblock, coublock;
744 unsigned long maxrss, cmaxrss;
745 struct task_io_accounting ioac;
746
747
748
749
750
751
752
753 unsigned long long sum_sched_runtime;
754
755
756
757
758
759
760
761
762
763
764 struct rlimit rlim[RLIM_NLIMITS];
765
766#ifdef CONFIG_BSD_PROCESS_ACCT
767 struct pacct_struct pacct;
768#endif
769#ifdef CONFIG_TASKSTATS
770 struct taskstats *stats;
771#endif
772#ifdef CONFIG_AUDIT
773 unsigned audit_tty;
774 unsigned audit_tty_log_passwd;
775 struct tty_audit_buf *tty_audit_buf;
776#endif
777
778 oom_flags_t oom_flags;
779 short oom_score_adj;
780 short oom_score_adj_min;
781
782
783 struct mutex cred_guard_mutex;
784
785
786};
787
788
789
790
791#define SIGNAL_STOP_STOPPED 0x00000001
792#define SIGNAL_STOP_CONTINUED 0x00000002
793#define SIGNAL_GROUP_EXIT 0x00000004
794#define SIGNAL_GROUP_COREDUMP 0x00000008
795
796
797
798#define SIGNAL_CLD_STOPPED 0x00000010
799#define SIGNAL_CLD_CONTINUED 0x00000020
800#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
801
802#define SIGNAL_UNKILLABLE 0x00000040
803
804
805static inline int signal_group_exit(const struct signal_struct *sig)
806{
807 return (sig->flags & SIGNAL_GROUP_EXIT) ||
808 (sig->group_exit_task != NULL);
809}
810
811
812
813
814struct user_struct {
815 atomic_t __count;
816 atomic_t processes;
817 atomic_t sigpending;
818#ifdef CONFIG_INOTIFY_USER
819 atomic_t inotify_watches;
820 atomic_t inotify_devs;
821#endif
822#ifdef CONFIG_FANOTIFY
823 atomic_t fanotify_listeners;
824#endif
825#ifdef CONFIG_EPOLL
826 atomic_long_t epoll_watches;
827#endif
828#ifdef CONFIG_POSIX_MQUEUE
829
830 unsigned long mq_bytes;
831#endif
832 unsigned long locked_shm;
833
834#ifdef CONFIG_KEYS
835 struct key *uid_keyring;
836 struct key *session_keyring;
837#endif
838
839
840 struct hlist_node uidhash_node;
841 kuid_t uid;
842
843#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
844 atomic_long_t locked_vm;
845#endif
846};
847
848extern int uids_sysfs_init(void);
849
850extern struct user_struct *find_user(kuid_t);
851
852extern struct user_struct root_user;
853#define INIT_USER (&root_user)
854
855
856struct backing_dev_info;
857struct reclaim_state;
858
859#ifdef CONFIG_SCHED_INFO
860struct sched_info {
861
862 unsigned long pcount;
863 unsigned long long run_delay;
864
865
866 unsigned long long last_arrival,
867 last_queued;
868};
869#endif
870
871#ifdef CONFIG_TASK_DELAY_ACCT
872struct task_delay_info {
873 spinlock_t lock;
874 unsigned int flags;
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891 u64 blkio_start;
892 u64 blkio_delay;
893 u64 swapin_delay;
894 u32 blkio_count;
895
896 u32 swapin_count;
897
898
899 u64 freepages_start;
900 u64 freepages_delay;
901 u32 freepages_count;
902};
903#endif
904
905static inline int sched_info_on(void)
906{
907#ifdef CONFIG_SCHEDSTATS
908 return 1;
909#elif defined(CONFIG_TASK_DELAY_ACCT)
910 extern int delayacct_on;
911 return delayacct_on;
912#else
913 return 0;
914#endif
915}
916
917enum cpu_idle_type {
918 CPU_IDLE,
919 CPU_NOT_IDLE,
920 CPU_NEWLY_IDLE,
921 CPU_MAX_IDLE_TYPES
922};
923
924
925
926
927#define SCHED_CAPACITY_SHIFT 10
928#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956struct wake_q_node {
957 struct wake_q_node *next;
958};
959
960struct wake_q_head {
961 struct wake_q_node *first;
962 struct wake_q_node **lastp;
963};
964
965#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
966
967#define WAKE_Q(name) \
968 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
969
970extern void wake_q_add(struct wake_q_head *head,
971 struct task_struct *task);
972extern void wake_up_q(struct wake_q_head *head);
973
974
975
976
977#ifdef CONFIG_SMP
978#define SD_LOAD_BALANCE 0x0001
979#define SD_BALANCE_NEWIDLE 0x0002
980#define SD_BALANCE_EXEC 0x0004
981#define SD_BALANCE_FORK 0x0008
982#define SD_BALANCE_WAKE 0x0010
983#define SD_WAKE_AFFINE 0x0020
984#define SD_SHARE_CPUCAPACITY 0x0080
985#define SD_SHARE_POWERDOMAIN 0x0100
986#define SD_SHARE_PKG_RESOURCES 0x0200
987#define SD_SERIALIZE 0x0400
988#define SD_ASYM_PACKING 0x0800
989#define SD_PREFER_SIBLING 0x1000
990#define SD_OVERLAP 0x2000
991#define SD_NUMA 0x4000
992
993#ifdef CONFIG_SCHED_SMT
994static inline int cpu_smt_flags(void)
995{
996 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
997}
998#endif
999
1000#ifdef CONFIG_SCHED_MC
1001static inline int cpu_core_flags(void)
1002{
1003 return SD_SHARE_PKG_RESOURCES;
1004}
1005#endif
1006
1007#ifdef CONFIG_NUMA
1008static inline int cpu_numa_flags(void)
1009{
1010 return SD_NUMA;
1011}
1012#endif
1013
1014struct sched_domain_attr {
1015 int relax_domain_level;
1016};
1017
1018#define SD_ATTR_INIT (struct sched_domain_attr) { \
1019 .relax_domain_level = -1, \
1020}
1021
1022extern int sched_domain_level_max;
1023
1024struct sched_group;
1025
1026struct sched_domain {
1027
1028 struct sched_domain *parent;
1029 struct sched_domain *child;
1030 struct sched_group *groups;
1031 unsigned long min_interval;
1032 unsigned long max_interval;
1033 unsigned int busy_factor;
1034 unsigned int imbalance_pct;
1035 unsigned int cache_nice_tries;
1036 unsigned int busy_idx;
1037 unsigned int idle_idx;
1038 unsigned int newidle_idx;
1039 unsigned int wake_idx;
1040 unsigned int forkexec_idx;
1041 unsigned int smt_gain;
1042
1043 int nohz_idle;
1044 int flags;
1045 int level;
1046
1047
1048 unsigned long last_balance;
1049 unsigned int balance_interval;
1050 unsigned int nr_balance_failed;
1051
1052
1053 u64 max_newidle_lb_cost;
1054 unsigned long next_decay_max_lb_cost;
1055
1056#ifdef CONFIG_SCHEDSTATS
1057
1058 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1059 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1060 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1061 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1062 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1063 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1064 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1065 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1066
1067
1068 unsigned int alb_count;
1069 unsigned int alb_failed;
1070 unsigned int alb_pushed;
1071
1072
1073 unsigned int sbe_count;
1074 unsigned int sbe_balanced;
1075 unsigned int sbe_pushed;
1076
1077
1078 unsigned int sbf_count;
1079 unsigned int sbf_balanced;
1080 unsigned int sbf_pushed;
1081
1082
1083 unsigned int ttwu_wake_remote;
1084 unsigned int ttwu_move_affine;
1085 unsigned int ttwu_move_balance;
1086#endif
1087#ifdef CONFIG_SCHED_DEBUG
1088 char *name;
1089#endif
1090 union {
1091 void *private;
1092 struct rcu_head rcu;
1093 };
1094
1095 unsigned int span_weight;
1096
1097
1098
1099
1100
1101
1102
1103 unsigned long span[0];
1104};
1105
1106static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1107{
1108 return to_cpumask(sd->span);
1109}
1110
1111extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1112 struct sched_domain_attr *dattr_new);
1113
1114
1115cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1116void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1117
1118bool cpus_share_cache(int this_cpu, int that_cpu);
1119
1120typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1121typedef int (*sched_domain_flags_f)(void);
1122
1123#define SDTL_OVERLAP 0x01
1124
1125struct sd_data {
1126 struct sched_domain **__percpu sd;
1127 struct sched_group **__percpu sg;
1128 struct sched_group_capacity **__percpu sgc;
1129};
1130
1131struct sched_domain_topology_level {
1132 sched_domain_mask_f mask;
1133 sched_domain_flags_f sd_flags;
1134 int flags;
1135 int numa_level;
1136 struct sd_data data;
1137#ifdef CONFIG_SCHED_DEBUG
1138 char *name;
1139#endif
1140};
1141
1142extern void set_sched_topology(struct sched_domain_topology_level *tl);
1143extern void wake_up_if_idle(int cpu);
1144
1145#ifdef CONFIG_SCHED_DEBUG
1146# define SD_INIT_NAME(type) .name = #type
1147#else
1148# define SD_INIT_NAME(type)
1149#endif
1150
1151#else
1152
1153struct sched_domain_attr;
1154
1155static inline void
1156partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1157 struct sched_domain_attr *dattr_new)
1158{
1159}
1160
1161static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1162{
1163 return true;
1164}
1165
1166#endif
1167
1168
1169struct io_context;
1170
1171
1172#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1173extern void prefetch_stack(struct task_struct *t);
1174#else
1175static inline void prefetch_stack(struct task_struct *t) { }
1176#endif
1177
1178struct audit_context;
1179struct mempolicy;
1180struct pipe_inode_info;
1181struct uts_namespace;
1182
1183struct load_weight {
1184 unsigned long weight;
1185 u32 inv_weight;
1186};
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202struct sched_avg {
1203 u64 last_update_time, load_sum;
1204 u32 util_sum, period_contrib;
1205 unsigned long load_avg, util_avg;
1206};
1207
1208#ifdef CONFIG_SCHEDSTATS
1209struct sched_statistics {
1210 u64 wait_start;
1211 u64 wait_max;
1212 u64 wait_count;
1213 u64 wait_sum;
1214 u64 iowait_count;
1215 u64 iowait_sum;
1216
1217 u64 sleep_start;
1218 u64 sleep_max;
1219 s64 sum_sleep_runtime;
1220
1221 u64 block_start;
1222 u64 block_max;
1223 u64 exec_max;
1224 u64 slice_max;
1225
1226 u64 nr_migrations_cold;
1227 u64 nr_failed_migrations_affine;
1228 u64 nr_failed_migrations_running;
1229 u64 nr_failed_migrations_hot;
1230 u64 nr_forced_migrations;
1231
1232 u64 nr_wakeups;
1233 u64 nr_wakeups_sync;
1234 u64 nr_wakeups_migrate;
1235 u64 nr_wakeups_local;
1236 u64 nr_wakeups_remote;
1237 u64 nr_wakeups_affine;
1238 u64 nr_wakeups_affine_attempts;
1239 u64 nr_wakeups_passive;
1240 u64 nr_wakeups_idle;
1241};
1242#endif
1243
1244struct sched_entity {
1245 struct load_weight load;
1246 struct rb_node run_node;
1247 struct list_head group_node;
1248 unsigned int on_rq;
1249
1250 u64 exec_start;
1251 u64 sum_exec_runtime;
1252 u64 vruntime;
1253 u64 prev_sum_exec_runtime;
1254
1255 u64 nr_migrations;
1256
1257#ifdef CONFIG_SCHEDSTATS
1258 struct sched_statistics statistics;
1259#endif
1260
1261#ifdef CONFIG_FAIR_GROUP_SCHED
1262 int depth;
1263 struct sched_entity *parent;
1264
1265 struct cfs_rq *cfs_rq;
1266
1267 struct cfs_rq *my_q;
1268#endif
1269
1270#ifdef CONFIG_SMP
1271
1272 struct sched_avg avg;
1273#endif
1274};
1275
1276struct sched_rt_entity {
1277 struct list_head run_list;
1278 unsigned long timeout;
1279 unsigned long watchdog_stamp;
1280 unsigned int time_slice;
1281
1282 struct sched_rt_entity *back;
1283#ifdef CONFIG_RT_GROUP_SCHED
1284 struct sched_rt_entity *parent;
1285
1286 struct rt_rq *rt_rq;
1287
1288 struct rt_rq *my_q;
1289#endif
1290};
1291
1292struct sched_dl_entity {
1293 struct rb_node rb_node;
1294
1295
1296
1297
1298
1299
1300 u64 dl_runtime;
1301 u64 dl_deadline;
1302 u64 dl_period;
1303 u64 dl_bw;
1304
1305
1306
1307
1308
1309
1310 s64 runtime;
1311 u64 deadline;
1312 unsigned int flags;
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 int dl_throttled, dl_new, dl_boosted, dl_yielded;
1333
1334
1335
1336
1337
1338 struct hrtimer dl_timer;
1339};
1340
1341union rcu_special {
1342 struct {
1343 u8 blocked;
1344 u8 need_qs;
1345 u8 exp_need_qs;
1346 u8 pad;
1347 } b;
1348 u32 s;
1349};
1350struct rcu_node;
1351
1352enum perf_event_task_context {
1353 perf_invalid_context = -1,
1354 perf_hw_context = 0,
1355 perf_sw_context,
1356 perf_nr_task_contexts,
1357};
1358
1359
1360struct tlbflush_unmap_batch {
1361
1362
1363
1364
1365 struct cpumask cpumask;
1366
1367
1368 bool flush_required;
1369
1370
1371
1372
1373
1374
1375 bool writable;
1376};
1377
1378struct task_struct {
1379 volatile long state;
1380 void *stack;
1381 atomic_t usage;
1382 unsigned int flags;
1383 unsigned int ptrace;
1384
1385#ifdef CONFIG_SMP
1386 struct llist_node wake_entry;
1387 int on_cpu;
1388 unsigned int wakee_flips;
1389 unsigned long wakee_flip_decay_ts;
1390 struct task_struct *last_wakee;
1391
1392 int wake_cpu;
1393#endif
1394 int on_rq;
1395
1396 int prio, static_prio, normal_prio;
1397 unsigned int rt_priority;
1398 const struct sched_class *sched_class;
1399 struct sched_entity se;
1400 struct sched_rt_entity rt;
1401#ifdef CONFIG_CGROUP_SCHED
1402 struct task_group *sched_task_group;
1403#endif
1404 struct sched_dl_entity dl;
1405
1406#ifdef CONFIG_PREEMPT_NOTIFIERS
1407
1408 struct hlist_head preempt_notifiers;
1409#endif
1410
1411#ifdef CONFIG_BLK_DEV_IO_TRACE
1412 unsigned int btrace_seq;
1413#endif
1414
1415 unsigned int policy;
1416 int nr_cpus_allowed;
1417 cpumask_t cpus_allowed;
1418
1419#ifdef CONFIG_PREEMPT_RCU
1420 int rcu_read_lock_nesting;
1421 union rcu_special rcu_read_unlock_special;
1422 struct list_head rcu_node_entry;
1423 struct rcu_node *rcu_blocked_node;
1424#endif
1425#ifdef CONFIG_TASKS_RCU
1426 unsigned long rcu_tasks_nvcsw;
1427 bool rcu_tasks_holdout;
1428 struct list_head rcu_tasks_holdout_list;
1429 int rcu_tasks_idle_cpu;
1430#endif
1431
1432#ifdef CONFIG_SCHED_INFO
1433 struct sched_info sched_info;
1434#endif
1435
1436 struct list_head tasks;
1437#ifdef CONFIG_SMP
1438 struct plist_node pushable_tasks;
1439 struct rb_node pushable_dl_tasks;
1440#endif
1441
1442 struct mm_struct *mm, *active_mm;
1443
1444 u32 vmacache_seqnum;
1445 struct vm_area_struct *vmacache[VMACACHE_SIZE];
1446#if defined(SPLIT_RSS_COUNTING)
1447 struct task_rss_stat rss_stat;
1448#endif
1449
1450 int exit_state;
1451 int exit_code, exit_signal;
1452 int pdeath_signal;
1453 unsigned long jobctl;
1454
1455
1456 unsigned int personality;
1457
1458
1459 unsigned sched_reset_on_fork:1;
1460 unsigned sched_contributes_to_load:1;
1461 unsigned sched_migrated:1;
1462 unsigned :0;
1463
1464
1465 unsigned in_execve:1;
1466 unsigned in_iowait:1;
1467#ifdef CONFIG_MEMCG
1468 unsigned memcg_may_oom:1;
1469#endif
1470#ifdef CONFIG_MEMCG_KMEM
1471 unsigned memcg_kmem_skip_account:1;
1472#endif
1473#ifdef CONFIG_COMPAT_BRK
1474 unsigned brk_randomized:1;
1475#endif
1476
1477 unsigned long atomic_flags;
1478
1479 struct restart_block restart_block;
1480
1481 pid_t pid;
1482 pid_t tgid;
1483
1484#ifdef CONFIG_CC_STACKPROTECTOR
1485
1486 unsigned long stack_canary;
1487#endif
1488
1489
1490
1491
1492
1493 struct task_struct __rcu *real_parent;
1494 struct task_struct __rcu *parent;
1495
1496
1497
1498 struct list_head children;
1499 struct list_head sibling;
1500 struct task_struct *group_leader;
1501
1502
1503
1504
1505
1506
1507 struct list_head ptraced;
1508 struct list_head ptrace_entry;
1509
1510
1511 struct pid_link pids[PIDTYPE_MAX];
1512 struct list_head thread_group;
1513 struct list_head thread_node;
1514
1515 struct completion *vfork_done;
1516 int __user *set_child_tid;
1517 int __user *clear_child_tid;
1518
1519 cputime_t utime, stime, utimescaled, stimescaled;
1520 cputime_t gtime;
1521 struct prev_cputime prev_cputime;
1522#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1523 seqlock_t vtime_seqlock;
1524 unsigned long long vtime_snap;
1525 enum {
1526 VTIME_SLEEPING = 0,
1527 VTIME_USER,
1528 VTIME_SYS,
1529 } vtime_snap_whence;
1530#endif
1531 unsigned long nvcsw, nivcsw;
1532 u64 start_time;
1533 u64 real_start_time;
1534
1535 unsigned long min_flt, maj_flt;
1536
1537 struct task_cputime cputime_expires;
1538 struct list_head cpu_timers[3];
1539
1540
1541 const struct cred __rcu *real_cred;
1542
1543 const struct cred __rcu *cred;
1544
1545 char comm[TASK_COMM_LEN];
1546
1547
1548
1549
1550 struct nameidata *nameidata;
1551#ifdef CONFIG_SYSVIPC
1552
1553 struct sysv_sem sysvsem;
1554 struct sysv_shm sysvshm;
1555#endif
1556#ifdef CONFIG_DETECT_HUNG_TASK
1557
1558 unsigned long last_switch_count;
1559#endif
1560
1561 struct fs_struct *fs;
1562
1563 struct files_struct *files;
1564
1565 struct nsproxy *nsproxy;
1566
1567 struct signal_struct *signal;
1568 struct sighand_struct *sighand;
1569
1570 sigset_t blocked, real_blocked;
1571 sigset_t saved_sigmask;
1572 struct sigpending pending;
1573
1574 unsigned long sas_ss_sp;
1575 size_t sas_ss_size;
1576
1577 struct callback_head *task_works;
1578
1579 struct audit_context *audit_context;
1580#ifdef CONFIG_AUDITSYSCALL
1581 kuid_t loginuid;
1582 unsigned int sessionid;
1583#endif
1584 struct seccomp seccomp;
1585
1586
1587 u32 parent_exec_id;
1588 u32 self_exec_id;
1589
1590
1591 spinlock_t alloc_lock;
1592
1593
1594 raw_spinlock_t pi_lock;
1595
1596 struct wake_q_node wake_q;
1597
1598#ifdef CONFIG_RT_MUTEXES
1599
1600 struct rb_root pi_waiters;
1601 struct rb_node *pi_waiters_leftmost;
1602
1603 struct rt_mutex_waiter *pi_blocked_on;
1604#endif
1605
1606#ifdef CONFIG_DEBUG_MUTEXES
1607
1608 struct mutex_waiter *blocked_on;
1609#endif
1610#ifdef CONFIG_TRACE_IRQFLAGS
1611 unsigned int irq_events;
1612 unsigned long hardirq_enable_ip;
1613 unsigned long hardirq_disable_ip;
1614 unsigned int hardirq_enable_event;
1615 unsigned int hardirq_disable_event;
1616 int hardirqs_enabled;
1617 int hardirq_context;
1618 unsigned long softirq_disable_ip;
1619 unsigned long softirq_enable_ip;
1620 unsigned int softirq_disable_event;
1621 unsigned int softirq_enable_event;
1622 int softirqs_enabled;
1623 int softirq_context;
1624#endif
1625#ifdef CONFIG_LOCKDEP
1626# define MAX_LOCK_DEPTH 48UL
1627 u64 curr_chain_key;
1628 int lockdep_depth;
1629 unsigned int lockdep_recursion;
1630 struct held_lock held_locks[MAX_LOCK_DEPTH];
1631 gfp_t lockdep_reclaim_gfp;
1632#endif
1633
1634
1635 void *journal_info;
1636
1637
1638 struct bio_list *bio_list;
1639
1640#ifdef CONFIG_BLOCK
1641
1642 struct blk_plug *plug;
1643#endif
1644
1645
1646 struct reclaim_state *reclaim_state;
1647
1648 struct backing_dev_info *backing_dev_info;
1649
1650 struct io_context *io_context;
1651
1652 unsigned long ptrace_message;
1653 siginfo_t *last_siginfo;
1654 struct task_io_accounting ioac;
1655#if defined(CONFIG_TASK_XACCT)
1656 u64 acct_rss_mem1;
1657 u64 acct_vm_mem1;
1658 cputime_t acct_timexpd;
1659#endif
1660#ifdef CONFIG_CPUSETS
1661 nodemask_t mems_allowed;
1662 seqcount_t mems_allowed_seq;
1663 int cpuset_mem_spread_rotor;
1664 int cpuset_slab_spread_rotor;
1665#endif
1666#ifdef CONFIG_CGROUPS
1667
1668 struct css_set __rcu *cgroups;
1669
1670 struct list_head cg_list;
1671#endif
1672#ifdef CONFIG_FUTEX
1673 struct robust_list_head __user *robust_list;
1674#ifdef CONFIG_COMPAT
1675 struct compat_robust_list_head __user *compat_robust_list;
1676#endif
1677 struct list_head pi_state_list;
1678 struct futex_pi_state *pi_state_cache;
1679#endif
1680#ifdef CONFIG_PERF_EVENTS
1681 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1682 struct mutex perf_event_mutex;
1683 struct list_head perf_event_list;
1684#endif
1685#ifdef CONFIG_DEBUG_PREEMPT
1686 unsigned long preempt_disable_ip;
1687#endif
1688#ifdef CONFIG_NUMA
1689 struct mempolicy *mempolicy;
1690 short il_next;
1691 short pref_node_fork;
1692#endif
1693#ifdef CONFIG_NUMA_BALANCING
1694 int numa_scan_seq;
1695 unsigned int numa_scan_period;
1696 unsigned int numa_scan_period_max;
1697 int numa_preferred_nid;
1698 unsigned long numa_migrate_retry;
1699 u64 node_stamp;
1700 u64 last_task_numa_placement;
1701 u64 last_sum_exec_runtime;
1702 struct callback_head numa_work;
1703
1704 struct list_head numa_entry;
1705 struct numa_group *numa_group;
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721 unsigned long *numa_faults;
1722 unsigned long total_numa_faults;
1723
1724
1725
1726
1727
1728
1729
1730 unsigned long numa_faults_locality[3];
1731
1732 unsigned long numa_pages_migrated;
1733#endif
1734
1735#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1736 struct tlbflush_unmap_batch tlb_ubc;
1737#endif
1738
1739 struct rcu_head rcu;
1740
1741
1742
1743
1744 struct pipe_inode_info *splice_pipe;
1745
1746 struct page_frag task_frag;
1747
1748#ifdef CONFIG_TASK_DELAY_ACCT
1749 struct task_delay_info *delays;
1750#endif
1751#ifdef CONFIG_FAULT_INJECTION
1752 int make_it_fail;
1753#endif
1754
1755
1756
1757
1758 int nr_dirtied;
1759 int nr_dirtied_pause;
1760 unsigned long dirty_paused_when;
1761
1762#ifdef CONFIG_LATENCYTOP
1763 int latency_record_count;
1764 struct latency_record latency_record[LT_SAVECOUNT];
1765#endif
1766
1767
1768
1769
1770 unsigned long timer_slack_ns;
1771 unsigned long default_timer_slack_ns;
1772
1773#ifdef CONFIG_KASAN
1774 unsigned int kasan_depth;
1775#endif
1776#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1777
1778 int curr_ret_stack;
1779
1780 struct ftrace_ret_stack *ret_stack;
1781
1782 unsigned long long ftrace_timestamp;
1783
1784
1785
1786
1787 atomic_t trace_overrun;
1788
1789 atomic_t tracing_graph_pause;
1790#endif
1791#ifdef CONFIG_TRACING
1792
1793 unsigned long trace;
1794
1795 unsigned long trace_recursion;
1796#endif
1797#ifdef CONFIG_MEMCG
1798 struct mem_cgroup *memcg_in_oom;
1799 gfp_t memcg_oom_gfp_mask;
1800 int memcg_oom_order;
1801
1802
1803 unsigned int memcg_nr_pages_over_high;
1804#endif
1805#ifdef CONFIG_UPROBES
1806 struct uprobe_task *utask;
1807#endif
1808#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1809 unsigned int sequential_io;
1810 unsigned int sequential_io_avg;
1811#endif
1812#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1813 unsigned long task_state_change;
1814#endif
1815 int pagefault_disabled;
1816
1817 struct thread_struct thread;
1818
1819
1820
1821
1822
1823
1824};
1825
1826#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1827extern int arch_task_struct_size __read_mostly;
1828#else
1829# define arch_task_struct_size (sizeof(struct task_struct))
1830#endif
1831
1832
1833#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1834
1835#define TNF_MIGRATED 0x01
1836#define TNF_NO_GROUP 0x02
1837#define TNF_SHARED 0x04
1838#define TNF_FAULT_LOCAL 0x08
1839#define TNF_MIGRATE_FAIL 0x10
1840
1841#ifdef CONFIG_NUMA_BALANCING
1842extern void task_numa_fault(int last_node, int node, int pages, int flags);
1843extern pid_t task_numa_group_id(struct task_struct *p);
1844extern void set_numabalancing_state(bool enabled);
1845extern void task_numa_free(struct task_struct *p);
1846extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1847 int src_nid, int dst_cpu);
1848#else
1849static inline void task_numa_fault(int last_node, int node, int pages,
1850 int flags)
1851{
1852}
1853static inline pid_t task_numa_group_id(struct task_struct *p)
1854{
1855 return 0;
1856}
1857static inline void set_numabalancing_state(bool enabled)
1858{
1859}
1860static inline void task_numa_free(struct task_struct *p)
1861{
1862}
1863static inline bool should_numa_migrate_memory(struct task_struct *p,
1864 struct page *page, int src_nid, int dst_cpu)
1865{
1866 return true;
1867}
1868#endif
1869
1870static inline struct pid *task_pid(struct task_struct *task)
1871{
1872 return task->pids[PIDTYPE_PID].pid;
1873}
1874
1875static inline struct pid *task_tgid(struct task_struct *task)
1876{
1877 return task->group_leader->pids[PIDTYPE_PID].pid;
1878}
1879
1880
1881
1882
1883
1884
1885static inline struct pid *task_pgrp(struct task_struct *task)
1886{
1887 return task->group_leader->pids[PIDTYPE_PGID].pid;
1888}
1889
1890static inline struct pid *task_session(struct task_struct *task)
1891{
1892 return task->group_leader->pids[PIDTYPE_SID].pid;
1893}
1894
1895struct pid_namespace;
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1911 struct pid_namespace *ns);
1912
1913static inline pid_t task_pid_nr(struct task_struct *tsk)
1914{
1915 return tsk->pid;
1916}
1917
1918static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1919 struct pid_namespace *ns)
1920{
1921 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1922}
1923
1924static inline pid_t task_pid_vnr(struct task_struct *tsk)
1925{
1926 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1927}
1928
1929
1930static inline pid_t task_tgid_nr(struct task_struct *tsk)
1931{
1932 return tsk->tgid;
1933}
1934
1935pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1936
1937static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1938{
1939 return pid_vnr(task_tgid(tsk));
1940}
1941
1942
1943static inline int pid_alive(const struct task_struct *p);
1944static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1945{
1946 pid_t pid = 0;
1947
1948 rcu_read_lock();
1949 if (pid_alive(tsk))
1950 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1951 rcu_read_unlock();
1952
1953 return pid;
1954}
1955
1956static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1957{
1958 return task_ppid_nr_ns(tsk, &init_pid_ns);
1959}
1960
1961static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1962 struct pid_namespace *ns)
1963{
1964 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1965}
1966
1967static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1968{
1969 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1970}
1971
1972
1973static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1974 struct pid_namespace *ns)
1975{
1976 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1977}
1978
1979static inline pid_t task_session_vnr(struct task_struct *tsk)
1980{
1981 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1982}
1983
1984
1985static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1986{
1987 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000static inline int pid_alive(const struct task_struct *p)
2001{
2002 return p->pids[PIDTYPE_PID].pid != NULL;
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014static inline int is_global_init(struct task_struct *tsk)
2015{
2016 return task_tgid_nr(tsk) == 1;
2017}
2018
2019extern struct pid *cad_pid;
2020
2021extern void free_task(struct task_struct *tsk);
2022#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
2023
2024extern void __put_task_struct(struct task_struct *t);
2025
2026static inline void put_task_struct(struct task_struct *t)
2027{
2028 if (atomic_dec_and_test(&t->usage))
2029 __put_task_struct(t);
2030}
2031
2032#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2033extern void task_cputime(struct task_struct *t,
2034 cputime_t *utime, cputime_t *stime);
2035extern void task_cputime_scaled(struct task_struct *t,
2036 cputime_t *utimescaled, cputime_t *stimescaled);
2037extern cputime_t task_gtime(struct task_struct *t);
2038#else
2039static inline void task_cputime(struct task_struct *t,
2040 cputime_t *utime, cputime_t *stime)
2041{
2042 if (utime)
2043 *utime = t->utime;
2044 if (stime)
2045 *stime = t->stime;
2046}
2047
2048static inline void task_cputime_scaled(struct task_struct *t,
2049 cputime_t *utimescaled,
2050 cputime_t *stimescaled)
2051{
2052 if (utimescaled)
2053 *utimescaled = t->utimescaled;
2054 if (stimescaled)
2055 *stimescaled = t->stimescaled;
2056}
2057
2058static inline cputime_t task_gtime(struct task_struct *t)
2059{
2060 return t->gtime;
2061}
2062#endif
2063extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2064extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2065
2066
2067
2068
2069#define PF_EXITING 0x00000004
2070#define PF_EXITPIDONE 0x00000008
2071#define PF_VCPU 0x00000010
2072#define PF_WQ_WORKER 0x00000020
2073#define PF_FORKNOEXEC 0x00000040
2074#define PF_MCE_PROCESS 0x00000080
2075#define PF_SUPERPRIV 0x00000100
2076#define PF_DUMPCORE 0x00000200
2077#define PF_SIGNALED 0x00000400
2078#define PF_MEMALLOC 0x00000800
2079#define PF_NPROC_EXCEEDED 0x00001000
2080#define PF_USED_MATH 0x00002000
2081#define PF_USED_ASYNC 0x00004000
2082#define PF_NOFREEZE 0x00008000
2083#define PF_FROZEN 0x00010000
2084#define PF_FSTRANS 0x00020000
2085#define PF_KSWAPD 0x00040000
2086#define PF_MEMALLOC_NOIO 0x00080000
2087#define PF_LESS_THROTTLE 0x00100000
2088#define PF_KTHREAD 0x00200000
2089#define PF_RANDOMIZE 0x00400000
2090#define PF_SWAPWRITE 0x00800000
2091#define PF_NO_SETAFFINITY 0x04000000
2092#define PF_MCE_EARLY 0x08000000
2093#define PF_MUTEX_TESTER 0x20000000
2094#define PF_FREEZER_SKIP 0x40000000
2095#define PF_SUSPEND_TASK 0x80000000
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2109#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2110#define clear_used_math() clear_stopped_child_used_math(current)
2111#define set_used_math() set_stopped_child_used_math(current)
2112#define conditional_stopped_child_used_math(condition, child) \
2113 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2114#define conditional_used_math(condition) \
2115 conditional_stopped_child_used_math(condition, current)
2116#define copy_to_stopped_child_used_math(child) \
2117 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2118
2119#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2120#define used_math() tsk_used_math(current)
2121
2122
2123
2124
2125static inline gfp_t memalloc_noio_flags(gfp_t flags)
2126{
2127 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2128 flags &= ~(__GFP_IO | __GFP_FS);
2129 return flags;
2130}
2131
2132static inline unsigned int memalloc_noio_save(void)
2133{
2134 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2135 current->flags |= PF_MEMALLOC_NOIO;
2136 return flags;
2137}
2138
2139static inline void memalloc_noio_restore(unsigned int flags)
2140{
2141 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2142}
2143
2144
2145#define PFA_NO_NEW_PRIVS 0
2146#define PFA_SPREAD_PAGE 1
2147#define PFA_SPREAD_SLAB 2
2148
2149
2150#define TASK_PFA_TEST(name, func) \
2151 static inline bool task_##func(struct task_struct *p) \
2152 { return test_bit(PFA_##name, &p->atomic_flags); }
2153#define TASK_PFA_SET(name, func) \
2154 static inline void task_set_##func(struct task_struct *p) \
2155 { set_bit(PFA_##name, &p->atomic_flags); }
2156#define TASK_PFA_CLEAR(name, func) \
2157 static inline void task_clear_##func(struct task_struct *p) \
2158 { clear_bit(PFA_##name, &p->atomic_flags); }
2159
2160TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2161TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2162
2163TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2164TASK_PFA_SET(SPREAD_PAGE, spread_page)
2165TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2166
2167TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2168TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2169TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2170
2171
2172
2173
2174#define JOBCTL_STOP_SIGMASK 0xffff
2175
2176#define JOBCTL_STOP_DEQUEUED_BIT 16
2177#define JOBCTL_STOP_PENDING_BIT 17
2178#define JOBCTL_STOP_CONSUME_BIT 18
2179#define JOBCTL_TRAP_STOP_BIT 19
2180#define JOBCTL_TRAP_NOTIFY_BIT 20
2181#define JOBCTL_TRAPPING_BIT 21
2182#define JOBCTL_LISTENING_BIT 22
2183
2184#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
2185#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
2186#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
2187#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
2188#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
2189#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
2190#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
2191
2192#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2193#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2194
2195extern bool task_set_jobctl_pending(struct task_struct *task,
2196 unsigned long mask);
2197extern void task_clear_jobctl_trapping(struct task_struct *task);
2198extern void task_clear_jobctl_pending(struct task_struct *task,
2199 unsigned long mask);
2200
2201static inline void rcu_copy_process(struct task_struct *p)
2202{
2203#ifdef CONFIG_PREEMPT_RCU
2204 p->rcu_read_lock_nesting = 0;
2205 p->rcu_read_unlock_special.s = 0;
2206 p->rcu_blocked_node = NULL;
2207 INIT_LIST_HEAD(&p->rcu_node_entry);
2208#endif
2209#ifdef CONFIG_TASKS_RCU
2210 p->rcu_tasks_holdout = false;
2211 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2212 p->rcu_tasks_idle_cpu = -1;
2213#endif
2214}
2215
2216static inline void tsk_restore_flags(struct task_struct *task,
2217 unsigned long orig_flags, unsigned long flags)
2218{
2219 task->flags &= ~flags;
2220 task->flags |= orig_flags & flags;
2221}
2222
2223extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2224 const struct cpumask *trial);
2225extern int task_can_attach(struct task_struct *p,
2226 const struct cpumask *cs_cpus_allowed);
2227#ifdef CONFIG_SMP
2228extern void do_set_cpus_allowed(struct task_struct *p,
2229 const struct cpumask *new_mask);
2230
2231extern int set_cpus_allowed_ptr(struct task_struct *p,
2232 const struct cpumask *new_mask);
2233#else
2234static inline void do_set_cpus_allowed(struct task_struct *p,
2235 const struct cpumask *new_mask)
2236{
2237}
2238static inline int set_cpus_allowed_ptr(struct task_struct *p,
2239 const struct cpumask *new_mask)
2240{
2241 if (!cpumask_test_cpu(0, new_mask))
2242 return -EINVAL;
2243 return 0;
2244}
2245#endif
2246
2247#ifdef CONFIG_NO_HZ_COMMON
2248void calc_load_enter_idle(void);
2249void calc_load_exit_idle(void);
2250#else
2251static inline void calc_load_enter_idle(void) { }
2252static inline void calc_load_exit_idle(void) { }
2253#endif
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263extern unsigned long long notrace sched_clock(void);
2264
2265
2266
2267extern u64 cpu_clock(int cpu);
2268extern u64 local_clock(void);
2269extern u64 running_clock(void);
2270extern u64 sched_clock_cpu(int cpu);
2271
2272
2273extern void sched_clock_init(void);
2274
2275#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2276static inline void sched_clock_tick(void)
2277{
2278}
2279
2280static inline void sched_clock_idle_sleep_event(void)
2281{
2282}
2283
2284static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2285{
2286}
2287#else
2288
2289
2290
2291
2292
2293
2294extern int sched_clock_stable(void);
2295extern void set_sched_clock_stable(void);
2296extern void clear_sched_clock_stable(void);
2297
2298extern void sched_clock_tick(void);
2299extern void sched_clock_idle_sleep_event(void);
2300extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2301#endif
2302
2303#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2304
2305
2306
2307
2308
2309extern void enable_sched_clock_irqtime(void);
2310extern void disable_sched_clock_irqtime(void);
2311#else
2312static inline void enable_sched_clock_irqtime(void) {}
2313static inline void disable_sched_clock_irqtime(void) {}
2314#endif
2315
2316extern unsigned long long
2317task_sched_runtime(struct task_struct *task);
2318
2319
2320#ifdef CONFIG_SMP
2321extern void sched_exec(void);
2322#else
2323#define sched_exec() {}
2324#endif
2325
2326extern void sched_clock_idle_sleep_event(void);
2327extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2328
2329#ifdef CONFIG_HOTPLUG_CPU
2330extern void idle_task_exit(void);
2331#else
2332static inline void idle_task_exit(void) {}
2333#endif
2334
2335#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2336extern void wake_up_nohz_cpu(int cpu);
2337#else
2338static inline void wake_up_nohz_cpu(int cpu) { }
2339#endif
2340
2341#ifdef CONFIG_NO_HZ_FULL
2342extern bool sched_can_stop_tick(void);
2343extern u64 scheduler_tick_max_deferment(void);
2344#else
2345static inline bool sched_can_stop_tick(void) { return false; }
2346#endif
2347
2348#ifdef CONFIG_SCHED_AUTOGROUP
2349extern void sched_autogroup_create_attach(struct task_struct *p);
2350extern void sched_autogroup_detach(struct task_struct *p);
2351extern void sched_autogroup_fork(struct signal_struct *sig);
2352extern void sched_autogroup_exit(struct signal_struct *sig);
2353#ifdef CONFIG_PROC_FS
2354extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2355extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2356#endif
2357#else
2358static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2359static inline void sched_autogroup_detach(struct task_struct *p) { }
2360static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2361static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2362#endif
2363
2364extern int yield_to(struct task_struct *p, bool preempt);
2365extern void set_user_nice(struct task_struct *p, long nice);
2366extern int task_prio(const struct task_struct *p);
2367
2368
2369
2370
2371
2372
2373static inline int task_nice(const struct task_struct *p)
2374{
2375 return PRIO_TO_NICE((p)->static_prio);
2376}
2377extern int can_nice(const struct task_struct *p, const int nice);
2378extern int task_curr(const struct task_struct *p);
2379extern int idle_cpu(int cpu);
2380extern int sched_setscheduler(struct task_struct *, int,
2381 const struct sched_param *);
2382extern int sched_setscheduler_nocheck(struct task_struct *, int,
2383 const struct sched_param *);
2384extern int sched_setattr(struct task_struct *,
2385 const struct sched_attr *);
2386extern struct task_struct *idle_task(int cpu);
2387
2388
2389
2390
2391
2392
2393static inline bool is_idle_task(const struct task_struct *p)
2394{
2395 return p->pid == 0;
2396}
2397extern struct task_struct *curr_task(int cpu);
2398extern void set_curr_task(int cpu, struct task_struct *p);
2399
2400void yield(void);
2401
2402union thread_union {
2403 struct thread_info thread_info;
2404 unsigned long stack[THREAD_SIZE/sizeof(long)];
2405};
2406
2407#ifndef __HAVE_ARCH_KSTACK_END
2408static inline int kstack_end(void *addr)
2409{
2410
2411
2412
2413 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2414}
2415#endif
2416
2417extern union thread_union init_thread_union;
2418extern struct task_struct init_task;
2419
2420extern struct mm_struct init_mm;
2421
2422extern struct pid_namespace init_pid_ns;
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435extern struct task_struct *find_task_by_vpid(pid_t nr);
2436extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2437 struct pid_namespace *ns);
2438
2439
2440extern struct user_struct * alloc_uid(kuid_t);
2441static inline struct user_struct *get_uid(struct user_struct *u)
2442{
2443 atomic_inc(&u->__count);
2444 return u;
2445}
2446extern void free_uid(struct user_struct *);
2447
2448#include <asm/current.h>
2449
2450extern void xtime_update(unsigned long ticks);
2451
2452extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2453extern int wake_up_process(struct task_struct *tsk);
2454extern void wake_up_new_task(struct task_struct *tsk);
2455#ifdef CONFIG_SMP
2456 extern void kick_process(struct task_struct *tsk);
2457#else
2458 static inline void kick_process(struct task_struct *tsk) { }
2459#endif
2460extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2461extern void sched_dead(struct task_struct *p);
2462
2463extern void proc_caches_init(void);
2464extern void flush_signals(struct task_struct *);
2465extern void ignore_signals(struct task_struct *);
2466extern void flush_signal_handlers(struct task_struct *, int force_default);
2467extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2468
2469static inline int kernel_dequeue_signal(siginfo_t *info)
2470{
2471 struct task_struct *tsk = current;
2472 siginfo_t __info;
2473 int ret;
2474
2475 spin_lock_irq(&tsk->sighand->siglock);
2476 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2477 spin_unlock_irq(&tsk->sighand->siglock);
2478
2479 return ret;
2480}
2481
2482static inline void kernel_signal_stop(void)
2483{
2484 spin_lock_irq(¤t->sighand->siglock);
2485 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2486 __set_current_state(TASK_STOPPED);
2487 spin_unlock_irq(¤t->sighand->siglock);
2488
2489 schedule();
2490}
2491
2492extern void release_task(struct task_struct * p);
2493extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2494extern int force_sigsegv(int, struct task_struct *);
2495extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2496extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2497extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2498extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2499 const struct cred *, u32);
2500extern int kill_pgrp(struct pid *pid, int sig, int priv);
2501extern int kill_pid(struct pid *pid, int sig, int priv);
2502extern int kill_proc_info(int, struct siginfo *, pid_t);
2503extern __must_check bool do_notify_parent(struct task_struct *, int);
2504extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2505extern void force_sig(int, struct task_struct *);
2506extern int send_sig(int, struct task_struct *, int);
2507extern int zap_other_threads(struct task_struct *p);
2508extern struct sigqueue *sigqueue_alloc(void);
2509extern void sigqueue_free(struct sigqueue *);
2510extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2511extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2512
2513static inline void restore_saved_sigmask(void)
2514{
2515 if (test_and_clear_restore_sigmask())
2516 __set_current_blocked(¤t->saved_sigmask);
2517}
2518
2519static inline sigset_t *sigmask_to_save(void)
2520{
2521 sigset_t *res = ¤t->blocked;
2522 if (unlikely(test_restore_sigmask()))
2523 res = ¤t->saved_sigmask;
2524 return res;
2525}
2526
2527static inline int kill_cad_pid(int sig, int priv)
2528{
2529 return kill_pid(cad_pid, sig, priv);
2530}
2531
2532
2533#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2534#define SEND_SIG_PRIV ((struct siginfo *) 1)
2535#define SEND_SIG_FORCED ((struct siginfo *) 2)
2536
2537
2538
2539
2540static inline int on_sig_stack(unsigned long sp)
2541{
2542#ifdef CONFIG_STACK_GROWSUP
2543 return sp >= current->sas_ss_sp &&
2544 sp - current->sas_ss_sp < current->sas_ss_size;
2545#else
2546 return sp > current->sas_ss_sp &&
2547 sp - current->sas_ss_sp <= current->sas_ss_size;
2548#endif
2549}
2550
2551static inline int sas_ss_flags(unsigned long sp)
2552{
2553 if (!current->sas_ss_size)
2554 return SS_DISABLE;
2555
2556 return on_sig_stack(sp) ? SS_ONSTACK : 0;
2557}
2558
2559static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2560{
2561 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2562#ifdef CONFIG_STACK_GROWSUP
2563 return current->sas_ss_sp;
2564#else
2565 return current->sas_ss_sp + current->sas_ss_size;
2566#endif
2567 return sp;
2568}
2569
2570
2571
2572
2573extern struct mm_struct * mm_alloc(void);
2574
2575
2576extern void __mmdrop(struct mm_struct *);
2577static inline void mmdrop(struct mm_struct * mm)
2578{
2579 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2580 __mmdrop(mm);
2581}
2582
2583
2584extern void mmput(struct mm_struct *);
2585
2586extern struct mm_struct *get_task_mm(struct task_struct *task);
2587
2588
2589
2590
2591
2592extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2593
2594extern void mm_release(struct task_struct *, struct mm_struct *);
2595
2596#ifdef CONFIG_HAVE_COPY_THREAD_TLS
2597extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2598 struct task_struct *, unsigned long);
2599#else
2600extern int copy_thread(unsigned long, unsigned long, unsigned long,
2601 struct task_struct *);
2602
2603
2604
2605static inline int copy_thread_tls(
2606 unsigned long clone_flags, unsigned long sp, unsigned long arg,
2607 struct task_struct *p, unsigned long tls)
2608{
2609 return copy_thread(clone_flags, sp, arg, p);
2610}
2611#endif
2612extern void flush_thread(void);
2613extern void exit_thread(void);
2614
2615extern void exit_files(struct task_struct *);
2616extern void __cleanup_sighand(struct sighand_struct *);
2617
2618extern void exit_itimers(struct signal_struct *);
2619extern void flush_itimer_signals(void);
2620
2621extern void do_group_exit(int);
2622
2623extern int do_execve(struct filename *,
2624 const char __user * const __user *,
2625 const char __user * const __user *);
2626extern int do_execveat(int, struct filename *,
2627 const char __user * const __user *,
2628 const char __user * const __user *,
2629 int);
2630extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
2631extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2632struct task_struct *fork_idle(int);
2633extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2634
2635extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2636static inline void set_task_comm(struct task_struct *tsk, const char *from)
2637{
2638 __set_task_comm(tsk, from, false);
2639}
2640extern char *get_task_comm(char *to, struct task_struct *tsk);
2641
2642#ifdef CONFIG_SMP
2643void scheduler_ipi(void);
2644extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2645#else
2646static inline void scheduler_ipi(void) { }
2647static inline unsigned long wait_task_inactive(struct task_struct *p,
2648 long match_state)
2649{
2650 return 1;
2651}
2652#endif
2653
2654#define tasklist_empty() \
2655 list_empty(&init_task.tasks)
2656
2657#define next_task(p) \
2658 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2659
2660#define for_each_process(p) \
2661 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2662
2663extern bool current_is_single_threaded(void);
2664
2665
2666
2667
2668
2669#define do_each_thread(g, t) \
2670 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2671
2672#define while_each_thread(g, t) \
2673 while ((t = next_thread(t)) != g)
2674
2675#define __for_each_thread(signal, t) \
2676 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2677
2678#define for_each_thread(p, t) \
2679 __for_each_thread((p)->signal, t)
2680
2681
2682#define for_each_process_thread(p, t) \
2683 for_each_process(p) for_each_thread(p, t)
2684
2685static inline int get_nr_threads(struct task_struct *tsk)
2686{
2687 return tsk->signal->nr_threads;
2688}
2689
2690static inline bool thread_group_leader(struct task_struct *p)
2691{
2692 return p->exit_signal >= 0;
2693}
2694
2695
2696
2697
2698
2699
2700
2701static inline bool has_group_leader_pid(struct task_struct *p)
2702{
2703 return task_pid(p) == p->signal->leader_pid;
2704}
2705
2706static inline
2707bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2708{
2709 return p1->signal == p2->signal;
2710}
2711
2712static inline struct task_struct *next_thread(const struct task_struct *p)
2713{
2714 return list_entry_rcu(p->thread_group.next,
2715 struct task_struct, thread_group);
2716}
2717
2718static inline int thread_group_empty(struct task_struct *p)
2719{
2720 return list_empty(&p->thread_group);
2721}
2722
2723#define delay_group_leader(p) \
2724 (thread_group_leader(p) && !thread_group_empty(p))
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736static inline void task_lock(struct task_struct *p)
2737{
2738 spin_lock(&p->alloc_lock);
2739}
2740
2741static inline void task_unlock(struct task_struct *p)
2742{
2743 spin_unlock(&p->alloc_lock);
2744}
2745
2746extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2747 unsigned long *flags);
2748
2749static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2750 unsigned long *flags)
2751{
2752 struct sighand_struct *ret;
2753
2754 ret = __lock_task_sighand(tsk, flags);
2755 (void)__cond_lock(&tsk->sighand->siglock, ret);
2756 return ret;
2757}
2758
2759static inline void unlock_task_sighand(struct task_struct *tsk,
2760 unsigned long *flags)
2761{
2762 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2763}
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776static inline void threadgroup_change_begin(struct task_struct *tsk)
2777{
2778 might_sleep();
2779 cgroup_threadgroup_change_begin(tsk);
2780}
2781
2782
2783
2784
2785
2786
2787
2788static inline void threadgroup_change_end(struct task_struct *tsk)
2789{
2790 cgroup_threadgroup_change_end(tsk);
2791}
2792
2793#ifndef __HAVE_THREAD_FUNCTIONS
2794
2795#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2796#define task_stack_page(task) ((task)->stack)
2797
2798static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2799{
2800 *task_thread_info(p) = *task_thread_info(org);
2801 task_thread_info(p)->task = p;
2802}
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813static inline unsigned long *end_of_stack(struct task_struct *p)
2814{
2815#ifdef CONFIG_STACK_GROWSUP
2816 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2817#else
2818 return (unsigned long *)(task_thread_info(p) + 1);
2819#endif
2820}
2821
2822#endif
2823#define task_stack_end_corrupted(task) \
2824 (*(end_of_stack(task)) != STACK_END_MAGIC)
2825
2826static inline int object_is_on_stack(void *obj)
2827{
2828 void *stack = task_stack_page(current);
2829
2830 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2831}
2832
2833extern void thread_info_cache_init(void);
2834
2835#ifdef CONFIG_DEBUG_STACK_USAGE
2836static inline unsigned long stack_not_used(struct task_struct *p)
2837{
2838 unsigned long *n = end_of_stack(p);
2839
2840 do {
2841 n++;
2842 } while (!*n);
2843
2844 return (unsigned long)n - (unsigned long)end_of_stack(p);
2845}
2846#endif
2847extern void set_task_stack_end_magic(struct task_struct *tsk);
2848
2849
2850
2851
2852static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2853{
2854 set_ti_thread_flag(task_thread_info(tsk), flag);
2855}
2856
2857static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2858{
2859 clear_ti_thread_flag(task_thread_info(tsk), flag);
2860}
2861
2862static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2863{
2864 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2865}
2866
2867static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2868{
2869 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2870}
2871
2872static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2873{
2874 return test_ti_thread_flag(task_thread_info(tsk), flag);
2875}
2876
2877static inline void set_tsk_need_resched(struct task_struct *tsk)
2878{
2879 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2880}
2881
2882static inline void clear_tsk_need_resched(struct task_struct *tsk)
2883{
2884 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2885}
2886
2887static inline int test_tsk_need_resched(struct task_struct *tsk)
2888{
2889 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2890}
2891
2892static inline int restart_syscall(void)
2893{
2894 set_tsk_thread_flag(current, TIF_SIGPENDING);
2895 return -ERESTARTNOINTR;
2896}
2897
2898static inline int signal_pending(struct task_struct *p)
2899{
2900 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2901}
2902
2903static inline int __fatal_signal_pending(struct task_struct *p)
2904{
2905 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2906}
2907
2908static inline int fatal_signal_pending(struct task_struct *p)
2909{
2910 return signal_pending(p) && __fatal_signal_pending(p);
2911}
2912
2913static inline int signal_pending_state(long state, struct task_struct *p)
2914{
2915 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2916 return 0;
2917 if (!signal_pending(p))
2918 return 0;
2919
2920 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2921}
2922
2923
2924
2925
2926
2927
2928
2929
2930extern int _cond_resched(void);
2931
2932#define cond_resched() ({ \
2933 ___might_sleep(__FILE__, __LINE__, 0); \
2934 _cond_resched(); \
2935})
2936
2937extern int __cond_resched_lock(spinlock_t *lock);
2938
2939#define cond_resched_lock(lock) ({ \
2940 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2941 __cond_resched_lock(lock); \
2942})
2943
2944extern int __cond_resched_softirq(void);
2945
2946#define cond_resched_softirq() ({ \
2947 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2948 __cond_resched_softirq(); \
2949})
2950
2951static inline void cond_resched_rcu(void)
2952{
2953#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2954 rcu_read_unlock();
2955 cond_resched();
2956 rcu_read_lock();
2957#endif
2958}
2959
2960
2961
2962
2963
2964
2965static inline int spin_needbreak(spinlock_t *lock)
2966{
2967#ifdef CONFIG_PREEMPT
2968 return spin_is_contended(lock);
2969#else
2970 return 0;
2971#endif
2972}
2973
2974
2975
2976
2977
2978#ifdef TIF_POLLING_NRFLAG
2979static inline int tsk_is_polling(struct task_struct *p)
2980{
2981 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2982}
2983
2984static inline void __current_set_polling(void)
2985{
2986 set_thread_flag(TIF_POLLING_NRFLAG);
2987}
2988
2989static inline bool __must_check current_set_polling_and_test(void)
2990{
2991 __current_set_polling();
2992
2993
2994
2995
2996
2997 smp_mb__after_atomic();
2998
2999 return unlikely(tif_need_resched());
3000}
3001
3002static inline void __current_clr_polling(void)
3003{
3004 clear_thread_flag(TIF_POLLING_NRFLAG);
3005}
3006
3007static inline bool __must_check current_clr_polling_and_test(void)
3008{
3009 __current_clr_polling();
3010
3011
3012
3013
3014
3015 smp_mb__after_atomic();
3016
3017 return unlikely(tif_need_resched());
3018}
3019
3020#else
3021static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3022static inline void __current_set_polling(void) { }
3023static inline void __current_clr_polling(void) { }
3024
3025static inline bool __must_check current_set_polling_and_test(void)
3026{
3027 return unlikely(tif_need_resched());
3028}
3029static inline bool __must_check current_clr_polling_and_test(void)
3030{
3031 return unlikely(tif_need_resched());
3032}
3033#endif
3034
3035static inline void current_clr_polling(void)
3036{
3037 __current_clr_polling();
3038
3039
3040
3041
3042
3043
3044
3045 smp_mb();
3046
3047 preempt_fold_need_resched();
3048}
3049
3050static __always_inline bool need_resched(void)
3051{
3052 return unlikely(tif_need_resched());
3053}
3054
3055
3056
3057
3058void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3059void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3060
3061
3062
3063
3064
3065
3066
3067extern void recalc_sigpending_and_wake(struct task_struct *t);
3068extern void recalc_sigpending(void);
3069
3070extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3071
3072static inline void signal_wake_up(struct task_struct *t, bool resume)
3073{
3074 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3075}
3076static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3077{
3078 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3079}
3080
3081
3082
3083
3084#ifdef CONFIG_SMP
3085
3086static inline unsigned int task_cpu(const struct task_struct *p)
3087{
3088 return task_thread_info(p)->cpu;
3089}
3090
3091static inline int task_node(const struct task_struct *p)
3092{
3093 return cpu_to_node(task_cpu(p));
3094}
3095
3096extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3097
3098#else
3099
3100static inline unsigned int task_cpu(const struct task_struct *p)
3101{
3102 return 0;
3103}
3104
3105static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3106{
3107}
3108
3109#endif
3110
3111extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3112extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3113
3114#ifdef CONFIG_CGROUP_SCHED
3115extern struct task_group root_task_group;
3116#endif
3117
3118extern int task_can_switch_user(struct user_struct *up,
3119 struct task_struct *tsk);
3120
3121#ifdef CONFIG_TASK_XACCT
3122static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3123{
3124 tsk->ioac.rchar += amt;
3125}
3126
3127static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3128{
3129 tsk->ioac.wchar += amt;
3130}
3131
3132static inline void inc_syscr(struct task_struct *tsk)
3133{
3134 tsk->ioac.syscr++;
3135}
3136
3137static inline void inc_syscw(struct task_struct *tsk)
3138{
3139 tsk->ioac.syscw++;
3140}
3141#else
3142static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3143{
3144}
3145
3146static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3147{
3148}
3149
3150static inline void inc_syscr(struct task_struct *tsk)
3151{
3152}
3153
3154static inline void inc_syscw(struct task_struct *tsk)
3155{
3156}
3157#endif
3158
3159#ifndef TASK_SIZE_OF
3160#define TASK_SIZE_OF(tsk) TASK_SIZE
3161#endif
3162
3163#ifdef CONFIG_MEMCG
3164extern void mm_update_next_owner(struct mm_struct *mm);
3165#else
3166static inline void mm_update_next_owner(struct mm_struct *mm)
3167{
3168}
3169#endif
3170
3171static inline unsigned long task_rlimit(const struct task_struct *tsk,
3172 unsigned int limit)
3173{
3174 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3175}
3176
3177static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3178 unsigned int limit)
3179{
3180 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3181}
3182
3183static inline unsigned long rlimit(unsigned int limit)
3184{
3185 return task_rlimit(current, limit);
3186}
3187
3188static inline unsigned long rlimit_max(unsigned int limit)
3189{
3190 return task_rlimit_max(current, limit);
3191}
3192
3193#endif
3194