1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <uapi/linux/sched.h>
5
6#include <linux/sched/prio.h>
7
8
9struct sched_param {
10 int sched_priority;
11};
12
13#include <asm/param.h>
14
15#include <linux/capability.h>
16#include <linux/threads.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/timex.h>
20#include <linux/jiffies.h>
21#include <linux/plist.h>
22#include <linux/rbtree.h>
23#include <linux/thread_info.h>
24#include <linux/cpumask.h>
25#include <linux/errno.h>
26#include <linux/nodemask.h>
27#include <linux/mm_types.h>
28#include <linux/preempt.h>
29
30#include <asm/page.h>
31#include <asm/ptrace.h>
32#include <linux/cputime.h>
33
34#include <linux/smp.h>
35#include <linux/sem.h>
36#include <linux/shm.h>
37#include <linux/signal.h>
38#include <linux/compiler.h>
39#include <linux/completion.h>
40#include <linux/pid.h>
41#include <linux/percpu.h>
42#include <linux/topology.h>
43#include <linux/proportions.h>
44#include <linux/seccomp.h>
45#include <linux/rcupdate.h>
46#include <linux/rculist.h>
47#include <linux/rtmutex.h>
48
49#include <linux/time.h>
50#include <linux/param.h>
51#include <linux/resource.h>
52#include <linux/timer.h>
53#include <linux/hrtimer.h>
54#include <linux/kcov.h>
55#include <linux/task_io_accounting.h>
56#include <linux/latencytop.h>
57#include <linux/cred.h>
58#include <linux/llist.h>
59#include <linux/uidgid.h>
60#include <linux/gfp.h>
61#include <linux/magic.h>
62#include <linux/cgroup-defs.h>
63
64#include <asm/processor.h>
65
66#define SCHED_ATTR_SIZE_VER0 48
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112struct sched_attr {
113 u32 size;
114
115 u32 sched_policy;
116 u64 sched_flags;
117
118
119 s32 sched_nice;
120
121
122 u32 sched_priority;
123
124
125 u64 sched_runtime;
126 u64 sched_deadline;
127 u64 sched_period;
128};
129
130struct futex_pi_state;
131struct robust_list_head;
132struct bio_list;
133struct fs_struct;
134struct perf_event_context;
135struct blk_plug;
136struct filename;
137struct nameidata;
138
139#define VMACACHE_BITS 2
140#define VMACACHE_SIZE (1U << VMACACHE_BITS)
141#define VMACACHE_MASK (VMACACHE_SIZE - 1)
142
143
144
145
146
147
148
149
150
151
152
153extern unsigned long avenrun[];
154extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
155
156#define FSHIFT 11
157#define FIXED_1 (1<<FSHIFT)
158#define LOAD_FREQ (5*HZ+1)
159#define EXP_1 1884
160#define EXP_5 2014
161#define EXP_15 2037
162
163#define CALC_LOAD(load,exp,n) \
164 load *= exp; \
165 load += n*(FIXED_1-exp); \
166 load >>= FSHIFT;
167
168extern unsigned long total_forks;
169extern int nr_threads;
170DECLARE_PER_CPU(unsigned long, process_counts);
171extern int nr_processes(void);
172extern unsigned long nr_running(void);
173extern bool single_task_running(void);
174extern unsigned long nr_iowait(void);
175extern unsigned long nr_iowait_cpu(int cpu);
176extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
177
178extern void calc_global_load(unsigned long ticks);
179
180#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
181extern void update_cpu_load_nohz(int active);
182#else
183static inline void update_cpu_load_nohz(int active) { }
184#endif
185
186extern void dump_cpu_task(int cpu);
187
188struct seq_file;
189struct cfs_rq;
190struct task_group;
191#ifdef CONFIG_SCHED_DEBUG
192extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
193extern void proc_sched_set_task(struct task_struct *p);
194#endif
195
196
197
198
199
200
201
202
203
204
205
206#define TASK_RUNNING 0
207#define TASK_INTERRUPTIBLE 1
208#define TASK_UNINTERRUPTIBLE 2
209#define __TASK_STOPPED 4
210#define __TASK_TRACED 8
211
212#define EXIT_DEAD 16
213#define EXIT_ZOMBIE 32
214#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
215
216#define TASK_DEAD 64
217#define TASK_WAKEKILL 128
218#define TASK_WAKING 256
219#define TASK_PARKED 512
220#define TASK_NOLOAD 1024
221#define TASK_STATE_MAX 2048
222
223#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
224
225extern char ___assert_task_state[1 - 2*!!(
226 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
227
228
229#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
230#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
231#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
232
233#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
234
235
236#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
237#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
238
239
240#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
241 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
242 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
243
244#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
245#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
246#define task_is_stopped_or_traced(task) \
247 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
248#define task_contributes_to_load(task) \
249 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
250 (task->flags & PF_FROZEN) == 0 && \
251 (task->state & TASK_NOLOAD) == 0)
252
253#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
254
255#define __set_task_state(tsk, state_value) \
256 do { \
257 (tsk)->task_state_change = _THIS_IP_; \
258 (tsk)->state = (state_value); \
259 } while (0)
260#define set_task_state(tsk, state_value) \
261 do { \
262 (tsk)->task_state_change = _THIS_IP_; \
263 smp_store_mb((tsk)->state, (state_value)); \
264 } while (0)
265
266
267
268
269
270
271
272
273
274
275
276
277#define __set_current_state(state_value) \
278 do { \
279 current->task_state_change = _THIS_IP_; \
280 current->state = (state_value); \
281 } while (0)
282#define set_current_state(state_value) \
283 do { \
284 current->task_state_change = _THIS_IP_; \
285 smp_store_mb(current->state, (state_value)); \
286 } while (0)
287
288#else
289
290#define __set_task_state(tsk, state_value) \
291 do { (tsk)->state = (state_value); } while (0)
292#define set_task_state(tsk, state_value) \
293 smp_store_mb((tsk)->state, (state_value))
294
295
296
297
298
299
300
301
302
303
304
305
306#define __set_current_state(state_value) \
307 do { current->state = (state_value); } while (0)
308#define set_current_state(state_value) \
309 smp_store_mb(current->state, (state_value))
310
311#endif
312
313
314#define TASK_COMM_LEN 16
315
316#include <linux/spinlock.h>
317
318
319
320
321
322
323
324extern rwlock_t tasklist_lock;
325extern spinlock_t mmlist_lock;
326
327struct task_struct;
328
329#ifdef CONFIG_PROVE_RCU
330extern int lockdep_tasklist_lock_is_held(void);
331#endif
332
333extern void sched_init(void);
334extern void sched_init_smp(void);
335extern asmlinkage void schedule_tail(struct task_struct *prev);
336extern void init_idle(struct task_struct *idle, int cpu);
337extern void init_idle_bootup_task(struct task_struct *idle);
338
339extern cpumask_var_t cpu_isolated_map;
340
341extern int runqueue_is_locked(int cpu);
342
343#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
344extern void nohz_balance_enter_idle(int cpu);
345extern void set_cpu_sd_state_idle(void);
346extern int get_nohz_timer_target(void);
347#else
348static inline void nohz_balance_enter_idle(int cpu) { }
349static inline void set_cpu_sd_state_idle(void) { }
350#endif
351
352
353
354
355extern void show_state_filter(unsigned long state_filter);
356
357static inline void show_state(void)
358{
359 show_state_filter(0);
360}
361
362extern void show_regs(struct pt_regs *);
363
364
365
366
367
368
369extern void show_stack(struct task_struct *task, unsigned long *sp);
370
371extern void cpu_init (void);
372extern void trap_init(void);
373extern void update_process_times(int user);
374extern void scheduler_tick(void);
375
376extern void sched_show_task(struct task_struct *p);
377
378#ifdef CONFIG_LOCKUP_DETECTOR
379extern void touch_softlockup_watchdog_sched(void);
380extern void touch_softlockup_watchdog(void);
381extern void touch_softlockup_watchdog_sync(void);
382extern void touch_all_softlockup_watchdogs(void);
383extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
384 void __user *buffer,
385 size_t *lenp, loff_t *ppos);
386extern unsigned int softlockup_panic;
387extern unsigned int hardlockup_panic;
388void lockup_detector_init(void);
389#else
390static inline void touch_softlockup_watchdog_sched(void)
391{
392}
393static inline void touch_softlockup_watchdog(void)
394{
395}
396static inline void touch_softlockup_watchdog_sync(void)
397{
398}
399static inline void touch_all_softlockup_watchdogs(void)
400{
401}
402static inline void lockup_detector_init(void)
403{
404}
405#endif
406
407#ifdef CONFIG_DETECT_HUNG_TASK
408void reset_hung_task_detector(void);
409#else
410static inline void reset_hung_task_detector(void)
411{
412}
413#endif
414
415
416#define __sched __attribute__((__section__(".sched.text")))
417
418
419extern char __sched_text_start[], __sched_text_end[];
420
421
422extern int in_sched_functions(unsigned long addr);
423
424#define MAX_SCHEDULE_TIMEOUT LONG_MAX
425extern signed long schedule_timeout(signed long timeout);
426extern signed long schedule_timeout_interruptible(signed long timeout);
427extern signed long schedule_timeout_killable(signed long timeout);
428extern signed long schedule_timeout_uninterruptible(signed long timeout);
429extern signed long schedule_timeout_idle(signed long timeout);
430asmlinkage void schedule(void);
431extern void schedule_preempt_disabled(void);
432
433extern long io_schedule_timeout(long timeout);
434
435static inline void io_schedule(void)
436{
437 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
438}
439
440struct nsproxy;
441struct user_namespace;
442
443#ifdef CONFIG_MMU
444extern void arch_pick_mmap_layout(struct mm_struct *mm);
445extern unsigned long
446arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
447 unsigned long, unsigned long);
448extern unsigned long
449arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
450 unsigned long len, unsigned long pgoff,
451 unsigned long flags);
452#else
453static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
454#endif
455
456#define SUID_DUMP_DISABLE 0
457#define SUID_DUMP_USER 1
458#define SUID_DUMP_ROOT 2
459
460
461
462
463#define MMF_DUMPABLE_BITS 2
464#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
465
466extern void set_dumpable(struct mm_struct *mm, int value);
467
468
469
470
471
472
473static inline int __get_dumpable(unsigned long mm_flags)
474{
475 return mm_flags & MMF_DUMPABLE_MASK;
476}
477
478static inline int get_dumpable(struct mm_struct *mm)
479{
480 return __get_dumpable(mm->flags);
481}
482
483
484#define MMF_DUMP_ANON_PRIVATE 2
485#define MMF_DUMP_ANON_SHARED 3
486#define MMF_DUMP_MAPPED_PRIVATE 4
487#define MMF_DUMP_MAPPED_SHARED 5
488#define MMF_DUMP_ELF_HEADERS 6
489#define MMF_DUMP_HUGETLB_PRIVATE 7
490#define MMF_DUMP_HUGETLB_SHARED 8
491#define MMF_DUMP_DAX_PRIVATE 9
492#define MMF_DUMP_DAX_SHARED 10
493
494#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
495#define MMF_DUMP_FILTER_BITS 9
496#define MMF_DUMP_FILTER_MASK \
497 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
498#define MMF_DUMP_FILTER_DEFAULT \
499 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
500 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
501
502#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
503# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
504#else
505# define MMF_DUMP_MASK_DEFAULT_ELF 0
506#endif
507
508#define MMF_VM_MERGEABLE 16
509#define MMF_VM_HUGEPAGE 17
510#define MMF_EXE_FILE_CHANGED 18
511
512#define MMF_HAS_UPROBES 19
513#define MMF_RECALC_UPROBES 20
514
515#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
516
517struct sighand_struct {
518 atomic_t count;
519 struct k_sigaction action[_NSIG];
520 spinlock_t siglock;
521 wait_queue_head_t signalfd_wqh;
522};
523
524struct pacct_struct {
525 int ac_flag;
526 long ac_exitcode;
527 unsigned long ac_mem;
528 cputime_t ac_utime, ac_stime;
529 unsigned long ac_minflt, ac_majflt;
530};
531
532struct cpu_itimer {
533 cputime_t expires;
534 cputime_t incr;
535 u32 error;
536 u32 incr_error;
537};
538
539
540
541
542
543
544
545
546
547
548struct prev_cputime {
549#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
550 cputime_t utime;
551 cputime_t stime;
552 raw_spinlock_t lock;
553#endif
554};
555
556static inline void prev_cputime_init(struct prev_cputime *prev)
557{
558#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
559 prev->utime = prev->stime = 0;
560 raw_spin_lock_init(&prev->lock);
561#endif
562}
563
564
565
566
567
568
569
570
571
572
573
574struct task_cputime {
575 cputime_t utime;
576 cputime_t stime;
577 unsigned long long sum_exec_runtime;
578};
579
580
581#define virt_exp utime
582#define prof_exp stime
583#define sched_exp sum_exec_runtime
584
585#define INIT_CPUTIME \
586 (struct task_cputime) { \
587 .utime = 0, \
588 .stime = 0, \
589 .sum_exec_runtime = 0, \
590 }
591
592
593
594
595
596struct task_cputime_atomic {
597 atomic64_t utime;
598 atomic64_t stime;
599 atomic64_t sum_exec_runtime;
600};
601
602#define INIT_CPUTIME_ATOMIC \
603 (struct task_cputime_atomic) { \
604 .utime = ATOMIC64_INIT(0), \
605 .stime = ATOMIC64_INIT(0), \
606 .sum_exec_runtime = ATOMIC64_INIT(0), \
607 }
608
609#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
610
611
612
613
614
615
616
617#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
618
619
620
621
622
623
624
625
626
627
628#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
629
630
631
632
633
634
635
636
637
638
639
640
641struct thread_group_cputimer {
642 struct task_cputime_atomic cputime_atomic;
643 bool running;
644 bool checking_timer;
645};
646
647#include <linux/rwsem.h>
648struct autogroup;
649
650
651
652
653
654
655
656
657struct signal_struct {
658 atomic_t sigcnt;
659 atomic_t live;
660 int nr_threads;
661 struct list_head thread_head;
662
663 wait_queue_head_t wait_chldexit;
664
665
666 struct task_struct *curr_target;
667
668
669 struct sigpending shared_pending;
670
671
672 int group_exit_code;
673
674
675
676
677
678 int notify_count;
679 struct task_struct *group_exit_task;
680
681
682 int group_stop_count;
683 unsigned int flags;
684
685
686
687
688
689
690
691
692
693
694 unsigned int is_child_subreaper:1;
695 unsigned int has_child_subreaper:1;
696
697
698 int posix_timer_id;
699 struct list_head posix_timers;
700
701
702 struct hrtimer real_timer;
703 struct pid *leader_pid;
704 ktime_t it_real_incr;
705
706
707
708
709
710
711 struct cpu_itimer it[2];
712
713
714
715
716
717 struct thread_group_cputimer cputimer;
718
719
720 struct task_cputime cputime_expires;
721
722#ifdef CONFIG_NO_HZ_FULL
723 atomic_t tick_dep_mask;
724#endif
725
726 struct list_head cpu_timers[3];
727
728 struct pid *tty_old_pgrp;
729
730
731 int leader;
732
733 struct tty_struct *tty;
734
735#ifdef CONFIG_SCHED_AUTOGROUP
736 struct autogroup *autogroup;
737#endif
738
739
740
741
742
743
744 seqlock_t stats_lock;
745 cputime_t utime, stime, cutime, cstime;
746 cputime_t gtime;
747 cputime_t cgtime;
748 struct prev_cputime prev_cputime;
749 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
750 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
751 unsigned long inblock, oublock, cinblock, coublock;
752 unsigned long maxrss, cmaxrss;
753 struct task_io_accounting ioac;
754
755
756
757
758
759
760
761 unsigned long long sum_sched_runtime;
762
763
764
765
766
767
768
769
770
771
772 struct rlimit rlim[RLIM_NLIMITS];
773
774#ifdef CONFIG_BSD_PROCESS_ACCT
775 struct pacct_struct pacct;
776#endif
777#ifdef CONFIG_TASKSTATS
778 struct taskstats *stats;
779#endif
780#ifdef CONFIG_AUDIT
781 unsigned audit_tty;
782 struct tty_audit_buf *tty_audit_buf;
783#endif
784
785 oom_flags_t oom_flags;
786 short oom_score_adj;
787 short oom_score_adj_min;
788
789
790 struct mutex cred_guard_mutex;
791
792
793};
794
795
796
797
798#define SIGNAL_STOP_STOPPED 0x00000001
799#define SIGNAL_STOP_CONTINUED 0x00000002
800#define SIGNAL_GROUP_EXIT 0x00000004
801#define SIGNAL_GROUP_COREDUMP 0x00000008
802
803
804
805#define SIGNAL_CLD_STOPPED 0x00000010
806#define SIGNAL_CLD_CONTINUED 0x00000020
807#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
808
809#define SIGNAL_UNKILLABLE 0x00000040
810
811
812static inline int signal_group_exit(const struct signal_struct *sig)
813{
814 return (sig->flags & SIGNAL_GROUP_EXIT) ||
815 (sig->group_exit_task != NULL);
816}
817
818
819
820
821struct user_struct {
822 atomic_t __count;
823 atomic_t processes;
824 atomic_t sigpending;
825#ifdef CONFIG_INOTIFY_USER
826 atomic_t inotify_watches;
827 atomic_t inotify_devs;
828#endif
829#ifdef CONFIG_FANOTIFY
830 atomic_t fanotify_listeners;
831#endif
832#ifdef CONFIG_EPOLL
833 atomic_long_t epoll_watches;
834#endif
835#ifdef CONFIG_POSIX_MQUEUE
836
837 unsigned long mq_bytes;
838#endif
839 unsigned long locked_shm;
840 unsigned long unix_inflight;
841 atomic_long_t pipe_bufs;
842
843#ifdef CONFIG_KEYS
844 struct key *uid_keyring;
845 struct key *session_keyring;
846#endif
847
848
849 struct hlist_node uidhash_node;
850 kuid_t uid;
851
852#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
853 atomic_long_t locked_vm;
854#endif
855};
856
857extern int uids_sysfs_init(void);
858
859extern struct user_struct *find_user(kuid_t);
860
861extern struct user_struct root_user;
862#define INIT_USER (&root_user)
863
864
865struct backing_dev_info;
866struct reclaim_state;
867
868#ifdef CONFIG_SCHED_INFO
869struct sched_info {
870
871 unsigned long pcount;
872 unsigned long long run_delay;
873
874
875 unsigned long long last_arrival,
876 last_queued;
877};
878#endif
879
880#ifdef CONFIG_TASK_DELAY_ACCT
881struct task_delay_info {
882 spinlock_t lock;
883 unsigned int flags;
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900 u64 blkio_start;
901 u64 blkio_delay;
902 u64 swapin_delay;
903 u32 blkio_count;
904
905 u32 swapin_count;
906
907
908 u64 freepages_start;
909 u64 freepages_delay;
910 u32 freepages_count;
911};
912#endif
913
914static inline int sched_info_on(void)
915{
916#ifdef CONFIG_SCHEDSTATS
917 return 1;
918#elif defined(CONFIG_TASK_DELAY_ACCT)
919 extern int delayacct_on;
920 return delayacct_on;
921#else
922 return 0;
923#endif
924}
925
926#ifdef CONFIG_SCHEDSTATS
927void force_schedstat_enabled(void);
928#endif
929
930enum cpu_idle_type {
931 CPU_IDLE,
932 CPU_NOT_IDLE,
933 CPU_NEWLY_IDLE,
934 CPU_MAX_IDLE_TYPES
935};
936
937
938
939
940#define SCHED_CAPACITY_SHIFT 10
941#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969struct wake_q_node {
970 struct wake_q_node *next;
971};
972
973struct wake_q_head {
974 struct wake_q_node *first;
975 struct wake_q_node **lastp;
976};
977
978#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
979
980#define WAKE_Q(name) \
981 struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
982
983extern void wake_q_add(struct wake_q_head *head,
984 struct task_struct *task);
985extern void wake_up_q(struct wake_q_head *head);
986
987
988
989
990#ifdef CONFIG_SMP
991#define SD_LOAD_BALANCE 0x0001
992#define SD_BALANCE_NEWIDLE 0x0002
993#define SD_BALANCE_EXEC 0x0004
994#define SD_BALANCE_FORK 0x0008
995#define SD_BALANCE_WAKE 0x0010
996#define SD_WAKE_AFFINE 0x0020
997#define SD_SHARE_CPUCAPACITY 0x0080
998#define SD_SHARE_POWERDOMAIN 0x0100
999#define SD_SHARE_PKG_RESOURCES 0x0200
1000#define SD_SERIALIZE 0x0400
1001#define SD_ASYM_PACKING 0x0800
1002#define SD_PREFER_SIBLING 0x1000
1003#define SD_OVERLAP 0x2000
1004#define SD_NUMA 0x4000
1005
1006#ifdef CONFIG_SCHED_SMT
1007static inline int cpu_smt_flags(void)
1008{
1009 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1010}
1011#endif
1012
1013#ifdef CONFIG_SCHED_MC
1014static inline int cpu_core_flags(void)
1015{
1016 return SD_SHARE_PKG_RESOURCES;
1017}
1018#endif
1019
1020#ifdef CONFIG_NUMA
1021static inline int cpu_numa_flags(void)
1022{
1023 return SD_NUMA;
1024}
1025#endif
1026
1027struct sched_domain_attr {
1028 int relax_domain_level;
1029};
1030
1031#define SD_ATTR_INIT (struct sched_domain_attr) { \
1032 .relax_domain_level = -1, \
1033}
1034
1035extern int sched_domain_level_max;
1036
1037struct sched_group;
1038
1039struct sched_domain {
1040
1041 struct sched_domain *parent;
1042 struct sched_domain *child;
1043 struct sched_group *groups;
1044 unsigned long min_interval;
1045 unsigned long max_interval;
1046 unsigned int busy_factor;
1047 unsigned int imbalance_pct;
1048 unsigned int cache_nice_tries;
1049 unsigned int busy_idx;
1050 unsigned int idle_idx;
1051 unsigned int newidle_idx;
1052 unsigned int wake_idx;
1053 unsigned int forkexec_idx;
1054 unsigned int smt_gain;
1055
1056 int nohz_idle;
1057 int flags;
1058 int level;
1059
1060
1061 unsigned long last_balance;
1062 unsigned int balance_interval;
1063 unsigned int nr_balance_failed;
1064
1065
1066 u64 max_newidle_lb_cost;
1067 unsigned long next_decay_max_lb_cost;
1068
1069#ifdef CONFIG_SCHEDSTATS
1070
1071 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
1072 unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
1073 unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
1074 unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
1075 unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
1076 unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
1077 unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
1078 unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
1079
1080
1081 unsigned int alb_count;
1082 unsigned int alb_failed;
1083 unsigned int alb_pushed;
1084
1085
1086 unsigned int sbe_count;
1087 unsigned int sbe_balanced;
1088 unsigned int sbe_pushed;
1089
1090
1091 unsigned int sbf_count;
1092 unsigned int sbf_balanced;
1093 unsigned int sbf_pushed;
1094
1095
1096 unsigned int ttwu_wake_remote;
1097 unsigned int ttwu_move_affine;
1098 unsigned int ttwu_move_balance;
1099#endif
1100#ifdef CONFIG_SCHED_DEBUG
1101 char *name;
1102#endif
1103 union {
1104 void *private;
1105 struct rcu_head rcu;
1106 };
1107
1108 unsigned int span_weight;
1109
1110
1111
1112
1113
1114
1115
1116 unsigned long span[0];
1117};
1118
1119static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1120{
1121 return to_cpumask(sd->span);
1122}
1123
1124extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1125 struct sched_domain_attr *dattr_new);
1126
1127
1128cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1129void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1130
1131bool cpus_share_cache(int this_cpu, int that_cpu);
1132
1133typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1134typedef int (*sched_domain_flags_f)(void);
1135
1136#define SDTL_OVERLAP 0x01
1137
1138struct sd_data {
1139 struct sched_domain **__percpu sd;
1140 struct sched_group **__percpu sg;
1141 struct sched_group_capacity **__percpu sgc;
1142};
1143
1144struct sched_domain_topology_level {
1145 sched_domain_mask_f mask;
1146 sched_domain_flags_f sd_flags;
1147 int flags;
1148 int numa_level;
1149 struct sd_data data;
1150#ifdef CONFIG_SCHED_DEBUG
1151 char *name;
1152#endif
1153};
1154
1155extern void set_sched_topology(struct sched_domain_topology_level *tl);
1156extern void wake_up_if_idle(int cpu);
1157
1158#ifdef CONFIG_SCHED_DEBUG
1159# define SD_INIT_NAME(type) .name = #type
1160#else
1161# define SD_INIT_NAME(type)
1162#endif
1163
1164#else
1165
1166struct sched_domain_attr;
1167
1168static inline void
1169partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1170 struct sched_domain_attr *dattr_new)
1171{
1172}
1173
1174static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1175{
1176 return true;
1177}
1178
1179#endif
1180
1181
1182struct io_context;
1183
1184
1185#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1186extern void prefetch_stack(struct task_struct *t);
1187#else
1188static inline void prefetch_stack(struct task_struct *t) { }
1189#endif
1190
1191struct audit_context;
1192struct mempolicy;
1193struct pipe_inode_info;
1194struct uts_namespace;
1195
1196struct load_weight {
1197 unsigned long weight;
1198 u32 inv_weight;
1199};
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215struct sched_avg {
1216 u64 last_update_time, load_sum;
1217 u32 util_sum, period_contrib;
1218 unsigned long load_avg, util_avg;
1219};
1220
1221#ifdef CONFIG_SCHEDSTATS
1222struct sched_statistics {
1223 u64 wait_start;
1224 u64 wait_max;
1225 u64 wait_count;
1226 u64 wait_sum;
1227 u64 iowait_count;
1228 u64 iowait_sum;
1229
1230 u64 sleep_start;
1231 u64 sleep_max;
1232 s64 sum_sleep_runtime;
1233
1234 u64 block_start;
1235 u64 block_max;
1236 u64 exec_max;
1237 u64 slice_max;
1238
1239 u64 nr_migrations_cold;
1240 u64 nr_failed_migrations_affine;
1241 u64 nr_failed_migrations_running;
1242 u64 nr_failed_migrations_hot;
1243 u64 nr_forced_migrations;
1244
1245 u64 nr_wakeups;
1246 u64 nr_wakeups_sync;
1247 u64 nr_wakeups_migrate;
1248 u64 nr_wakeups_local;
1249 u64 nr_wakeups_remote;
1250 u64 nr_wakeups_affine;
1251 u64 nr_wakeups_affine_attempts;
1252 u64 nr_wakeups_passive;
1253 u64 nr_wakeups_idle;
1254};
1255#endif
1256
1257struct sched_entity {
1258 struct load_weight load;
1259 struct rb_node run_node;
1260 struct list_head group_node;
1261 unsigned int on_rq;
1262
1263 u64 exec_start;
1264 u64 sum_exec_runtime;
1265 u64 vruntime;
1266 u64 prev_sum_exec_runtime;
1267
1268 u64 nr_migrations;
1269
1270#ifdef CONFIG_SCHEDSTATS
1271 struct sched_statistics statistics;
1272#endif
1273
1274#ifdef CONFIG_FAIR_GROUP_SCHED
1275 int depth;
1276 struct sched_entity *parent;
1277
1278 struct cfs_rq *cfs_rq;
1279
1280 struct cfs_rq *my_q;
1281#endif
1282
1283#ifdef CONFIG_SMP
1284
1285
1286
1287
1288
1289
1290 struct sched_avg avg ____cacheline_aligned_in_smp;
1291#endif
1292};
1293
1294struct sched_rt_entity {
1295 struct list_head run_list;
1296 unsigned long timeout;
1297 unsigned long watchdog_stamp;
1298 unsigned int time_slice;
1299 unsigned short on_rq;
1300 unsigned short on_list;
1301
1302 struct sched_rt_entity *back;
1303#ifdef CONFIG_RT_GROUP_SCHED
1304 struct sched_rt_entity *parent;
1305
1306 struct rt_rq *rt_rq;
1307
1308 struct rt_rq *my_q;
1309#endif
1310};
1311
1312struct sched_dl_entity {
1313 struct rb_node rb_node;
1314
1315
1316
1317
1318
1319
1320 u64 dl_runtime;
1321 u64 dl_deadline;
1322 u64 dl_period;
1323 u64 dl_bw;
1324
1325
1326
1327
1328
1329
1330 s64 runtime;
1331 u64 deadline;
1332 unsigned int flags;
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 int dl_throttled, dl_boosted, dl_yielded;
1349
1350
1351
1352
1353
1354 struct hrtimer dl_timer;
1355};
1356
1357union rcu_special {
1358 struct {
1359 u8 blocked;
1360 u8 need_qs;
1361 u8 exp_need_qs;
1362 u8 pad;
1363 } b;
1364 u32 s;
1365};
1366struct rcu_node;
1367
1368enum perf_event_task_context {
1369 perf_invalid_context = -1,
1370 perf_hw_context = 0,
1371 perf_sw_context,
1372 perf_nr_task_contexts,
1373};
1374
1375
1376struct tlbflush_unmap_batch {
1377
1378
1379
1380
1381 struct cpumask cpumask;
1382
1383
1384 bool flush_required;
1385
1386
1387
1388
1389
1390
1391 bool writable;
1392};
1393
1394struct task_struct {
1395 volatile long state;
1396 void *stack;
1397 atomic_t usage;
1398 unsigned int flags;
1399 unsigned int ptrace;
1400
1401#ifdef CONFIG_SMP
1402 struct llist_node wake_entry;
1403 int on_cpu;
1404 unsigned int wakee_flips;
1405 unsigned long wakee_flip_decay_ts;
1406 struct task_struct *last_wakee;
1407
1408 int wake_cpu;
1409#endif
1410 int on_rq;
1411
1412 int prio, static_prio, normal_prio;
1413 unsigned int rt_priority;
1414 const struct sched_class *sched_class;
1415 struct sched_entity se;
1416 struct sched_rt_entity rt;
1417#ifdef CONFIG_CGROUP_SCHED
1418 struct task_group *sched_task_group;
1419#endif
1420 struct sched_dl_entity dl;
1421
1422#ifdef CONFIG_PREEMPT_NOTIFIERS
1423
1424 struct hlist_head preempt_notifiers;
1425#endif
1426
1427#ifdef CONFIG_BLK_DEV_IO_TRACE
1428 unsigned int btrace_seq;
1429#endif
1430
1431 unsigned int policy;
1432 int nr_cpus_allowed;
1433 cpumask_t cpus_allowed;
1434
1435#ifdef CONFIG_PREEMPT_RCU
1436 int rcu_read_lock_nesting;
1437 union rcu_special rcu_read_unlock_special;
1438 struct list_head rcu_node_entry;
1439 struct rcu_node *rcu_blocked_node;
1440#endif
1441#ifdef CONFIG_TASKS_RCU
1442 unsigned long rcu_tasks_nvcsw;
1443 bool rcu_tasks_holdout;
1444 struct list_head rcu_tasks_holdout_list;
1445 int rcu_tasks_idle_cpu;
1446#endif
1447
1448#ifdef CONFIG_SCHED_INFO
1449 struct sched_info sched_info;
1450#endif
1451
1452 struct list_head tasks;
1453#ifdef CONFIG_SMP
1454 struct plist_node pushable_tasks;
1455 struct rb_node pushable_dl_tasks;
1456#endif
1457
1458 struct mm_struct *mm, *active_mm;
1459
1460 u32 vmacache_seqnum;
1461 struct vm_area_struct *vmacache[VMACACHE_SIZE];
1462#if defined(SPLIT_RSS_COUNTING)
1463 struct task_rss_stat rss_stat;
1464#endif
1465
1466 int exit_state;
1467 int exit_code, exit_signal;
1468 int pdeath_signal;
1469 unsigned long jobctl;
1470
1471
1472 unsigned int personality;
1473
1474
1475 unsigned sched_reset_on_fork:1;
1476 unsigned sched_contributes_to_load:1;
1477 unsigned sched_migrated:1;
1478 unsigned :0;
1479
1480
1481 unsigned in_execve:1;
1482 unsigned in_iowait:1;
1483#ifdef CONFIG_MEMCG
1484 unsigned memcg_may_oom:1;
1485#ifndef CONFIG_SLOB
1486 unsigned memcg_kmem_skip_account:1;
1487#endif
1488#endif
1489#ifdef CONFIG_COMPAT_BRK
1490 unsigned brk_randomized:1;
1491#endif
1492
1493 unsigned long atomic_flags;
1494
1495 struct restart_block restart_block;
1496
1497 pid_t pid;
1498 pid_t tgid;
1499
1500#ifdef CONFIG_CC_STACKPROTECTOR
1501
1502 unsigned long stack_canary;
1503#endif
1504
1505
1506
1507
1508
1509 struct task_struct __rcu *real_parent;
1510 struct task_struct __rcu *parent;
1511
1512
1513
1514 struct list_head children;
1515 struct list_head sibling;
1516 struct task_struct *group_leader;
1517
1518
1519
1520
1521
1522
1523 struct list_head ptraced;
1524 struct list_head ptrace_entry;
1525
1526
1527 struct pid_link pids[PIDTYPE_MAX];
1528 struct list_head thread_group;
1529 struct list_head thread_node;
1530
1531 struct completion *vfork_done;
1532 int __user *set_child_tid;
1533 int __user *clear_child_tid;
1534
1535 cputime_t utime, stime, utimescaled, stimescaled;
1536 cputime_t gtime;
1537 struct prev_cputime prev_cputime;
1538#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1539 seqcount_t vtime_seqcount;
1540 unsigned long long vtime_snap;
1541 enum {
1542
1543 VTIME_INACTIVE = 0,
1544
1545 VTIME_USER,
1546
1547 VTIME_SYS,
1548 } vtime_snap_whence;
1549#endif
1550
1551#ifdef CONFIG_NO_HZ_FULL
1552 atomic_t tick_dep_mask;
1553#endif
1554 unsigned long nvcsw, nivcsw;
1555 u64 start_time;
1556 u64 real_start_time;
1557
1558 unsigned long min_flt, maj_flt;
1559
1560 struct task_cputime cputime_expires;
1561 struct list_head cpu_timers[3];
1562
1563
1564 const struct cred __rcu *real_cred;
1565
1566 const struct cred __rcu *cred;
1567
1568 char comm[TASK_COMM_LEN];
1569
1570
1571
1572
1573 struct nameidata *nameidata;
1574#ifdef CONFIG_SYSVIPC
1575
1576 struct sysv_sem sysvsem;
1577 struct sysv_shm sysvshm;
1578#endif
1579#ifdef CONFIG_DETECT_HUNG_TASK
1580
1581 unsigned long last_switch_count;
1582#endif
1583
1584 struct fs_struct *fs;
1585
1586 struct files_struct *files;
1587
1588 struct nsproxy *nsproxy;
1589
1590 struct signal_struct *signal;
1591 struct sighand_struct *sighand;
1592
1593 sigset_t blocked, real_blocked;
1594 sigset_t saved_sigmask;
1595 struct sigpending pending;
1596
1597 unsigned long sas_ss_sp;
1598 size_t sas_ss_size;
1599
1600 struct callback_head *task_works;
1601
1602 struct audit_context *audit_context;
1603#ifdef CONFIG_AUDITSYSCALL
1604 kuid_t loginuid;
1605 unsigned int sessionid;
1606#endif
1607 struct seccomp seccomp;
1608
1609
1610 u32 parent_exec_id;
1611 u32 self_exec_id;
1612
1613
1614 spinlock_t alloc_lock;
1615
1616
1617 raw_spinlock_t pi_lock;
1618
1619 struct wake_q_node wake_q;
1620
1621#ifdef CONFIG_RT_MUTEXES
1622
1623 struct rb_root pi_waiters;
1624 struct rb_node *pi_waiters_leftmost;
1625
1626 struct rt_mutex_waiter *pi_blocked_on;
1627#endif
1628
1629#ifdef CONFIG_DEBUG_MUTEXES
1630
1631 struct mutex_waiter *blocked_on;
1632#endif
1633#ifdef CONFIG_TRACE_IRQFLAGS
1634 unsigned int irq_events;
1635 unsigned long hardirq_enable_ip;
1636 unsigned long hardirq_disable_ip;
1637 unsigned int hardirq_enable_event;
1638 unsigned int hardirq_disable_event;
1639 int hardirqs_enabled;
1640 int hardirq_context;
1641 unsigned long softirq_disable_ip;
1642 unsigned long softirq_enable_ip;
1643 unsigned int softirq_disable_event;
1644 unsigned int softirq_enable_event;
1645 int softirqs_enabled;
1646 int softirq_context;
1647#endif
1648#ifdef CONFIG_LOCKDEP
1649# define MAX_LOCK_DEPTH 48UL
1650 u64 curr_chain_key;
1651 int lockdep_depth;
1652 unsigned int lockdep_recursion;
1653 struct held_lock held_locks[MAX_LOCK_DEPTH];
1654 gfp_t lockdep_reclaim_gfp;
1655#endif
1656#ifdef CONFIG_UBSAN
1657 unsigned int in_ubsan;
1658#endif
1659
1660
1661 void *journal_info;
1662
1663
1664 struct bio_list *bio_list;
1665
1666#ifdef CONFIG_BLOCK
1667
1668 struct blk_plug *plug;
1669#endif
1670
1671
1672 struct reclaim_state *reclaim_state;
1673
1674 struct backing_dev_info *backing_dev_info;
1675
1676 struct io_context *io_context;
1677
1678 unsigned long ptrace_message;
1679 siginfo_t *last_siginfo;
1680 struct task_io_accounting ioac;
1681#if defined(CONFIG_TASK_XACCT)
1682 u64 acct_rss_mem1;
1683 u64 acct_vm_mem1;
1684 cputime_t acct_timexpd;
1685#endif
1686#ifdef CONFIG_CPUSETS
1687 nodemask_t mems_allowed;
1688 seqcount_t mems_allowed_seq;
1689 int cpuset_mem_spread_rotor;
1690 int cpuset_slab_spread_rotor;
1691#endif
1692#ifdef CONFIG_CGROUPS
1693
1694 struct css_set __rcu *cgroups;
1695
1696 struct list_head cg_list;
1697#endif
1698#ifdef CONFIG_FUTEX
1699 struct robust_list_head __user *robust_list;
1700#ifdef CONFIG_COMPAT
1701 struct compat_robust_list_head __user *compat_robust_list;
1702#endif
1703 struct list_head pi_state_list;
1704 struct futex_pi_state *pi_state_cache;
1705#endif
1706#ifdef CONFIG_PERF_EVENTS
1707 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1708 struct mutex perf_event_mutex;
1709 struct list_head perf_event_list;
1710#endif
1711#ifdef CONFIG_DEBUG_PREEMPT
1712 unsigned long preempt_disable_ip;
1713#endif
1714#ifdef CONFIG_NUMA
1715 struct mempolicy *mempolicy;
1716 short il_next;
1717 short pref_node_fork;
1718#endif
1719#ifdef CONFIG_NUMA_BALANCING
1720 int numa_scan_seq;
1721 unsigned int numa_scan_period;
1722 unsigned int numa_scan_period_max;
1723 int numa_preferred_nid;
1724 unsigned long numa_migrate_retry;
1725 u64 node_stamp;
1726 u64 last_task_numa_placement;
1727 u64 last_sum_exec_runtime;
1728 struct callback_head numa_work;
1729
1730 struct list_head numa_entry;
1731 struct numa_group *numa_group;
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747 unsigned long *numa_faults;
1748 unsigned long total_numa_faults;
1749
1750
1751
1752
1753
1754
1755
1756 unsigned long numa_faults_locality[3];
1757
1758 unsigned long numa_pages_migrated;
1759#endif
1760
1761#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1762 struct tlbflush_unmap_batch tlb_ubc;
1763#endif
1764
1765 struct rcu_head rcu;
1766
1767
1768
1769
1770 struct pipe_inode_info *splice_pipe;
1771
1772 struct page_frag task_frag;
1773
1774#ifdef CONFIG_TASK_DELAY_ACCT
1775 struct task_delay_info *delays;
1776#endif
1777#ifdef CONFIG_FAULT_INJECTION
1778 int make_it_fail;
1779#endif
1780
1781
1782
1783
1784 int nr_dirtied;
1785 int nr_dirtied_pause;
1786 unsigned long dirty_paused_when;
1787
1788#ifdef CONFIG_LATENCYTOP
1789 int latency_record_count;
1790 struct latency_record latency_record[LT_SAVECOUNT];
1791#endif
1792
1793
1794
1795
1796 u64 timer_slack_ns;
1797 u64 default_timer_slack_ns;
1798
1799#ifdef CONFIG_KASAN
1800 unsigned int kasan_depth;
1801#endif
1802#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1803
1804 int curr_ret_stack;
1805
1806 struct ftrace_ret_stack *ret_stack;
1807
1808 unsigned long long ftrace_timestamp;
1809
1810
1811
1812
1813 atomic_t trace_overrun;
1814
1815 atomic_t tracing_graph_pause;
1816#endif
1817#ifdef CONFIG_TRACING
1818
1819 unsigned long trace;
1820
1821 unsigned long trace_recursion;
1822#endif
1823#ifdef CONFIG_KCOV
1824
1825 enum kcov_mode kcov_mode;
1826
1827 unsigned kcov_size;
1828
1829 void *kcov_area;
1830
1831 struct kcov *kcov;
1832#endif
1833#ifdef CONFIG_MEMCG
1834 struct mem_cgroup *memcg_in_oom;
1835 gfp_t memcg_oom_gfp_mask;
1836 int memcg_oom_order;
1837
1838
1839 unsigned int memcg_nr_pages_over_high;
1840#endif
1841#ifdef CONFIG_UPROBES
1842 struct uprobe_task *utask;
1843#endif
1844#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1845 unsigned int sequential_io;
1846 unsigned int sequential_io_avg;
1847#endif
1848#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1849 unsigned long task_state_change;
1850#endif
1851 int pagefault_disabled;
1852#ifdef CONFIG_MMU
1853 struct task_struct *oom_reaper_list;
1854#endif
1855
1856 struct thread_struct thread;
1857
1858
1859
1860
1861
1862
1863};
1864
1865#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1866extern int arch_task_struct_size __read_mostly;
1867#else
1868# define arch_task_struct_size (sizeof(struct task_struct))
1869#endif
1870
1871
1872#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1873
1874#define TNF_MIGRATED 0x01
1875#define TNF_NO_GROUP 0x02
1876#define TNF_SHARED 0x04
1877#define TNF_FAULT_LOCAL 0x08
1878#define TNF_MIGRATE_FAIL 0x10
1879
1880#ifdef CONFIG_NUMA_BALANCING
1881extern void task_numa_fault(int last_node, int node, int pages, int flags);
1882extern pid_t task_numa_group_id(struct task_struct *p);
1883extern void set_numabalancing_state(bool enabled);
1884extern void task_numa_free(struct task_struct *p);
1885extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1886 int src_nid, int dst_cpu);
1887#else
1888static inline void task_numa_fault(int last_node, int node, int pages,
1889 int flags)
1890{
1891}
1892static inline pid_t task_numa_group_id(struct task_struct *p)
1893{
1894 return 0;
1895}
1896static inline void set_numabalancing_state(bool enabled)
1897{
1898}
1899static inline void task_numa_free(struct task_struct *p)
1900{
1901}
1902static inline bool should_numa_migrate_memory(struct task_struct *p,
1903 struct page *page, int src_nid, int dst_cpu)
1904{
1905 return true;
1906}
1907#endif
1908
1909static inline struct pid *task_pid(struct task_struct *task)
1910{
1911 return task->pids[PIDTYPE_PID].pid;
1912}
1913
1914static inline struct pid *task_tgid(struct task_struct *task)
1915{
1916 return task->group_leader->pids[PIDTYPE_PID].pid;
1917}
1918
1919
1920
1921
1922
1923
1924static inline struct pid *task_pgrp(struct task_struct *task)
1925{
1926 return task->group_leader->pids[PIDTYPE_PGID].pid;
1927}
1928
1929static inline struct pid *task_session(struct task_struct *task)
1930{
1931 return task->group_leader->pids[PIDTYPE_SID].pid;
1932}
1933
1934struct pid_namespace;
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1950 struct pid_namespace *ns);
1951
1952static inline pid_t task_pid_nr(struct task_struct *tsk)
1953{
1954 return tsk->pid;
1955}
1956
1957static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1958 struct pid_namespace *ns)
1959{
1960 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1961}
1962
1963static inline pid_t task_pid_vnr(struct task_struct *tsk)
1964{
1965 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1966}
1967
1968
1969static inline pid_t task_tgid_nr(struct task_struct *tsk)
1970{
1971 return tsk->tgid;
1972}
1973
1974pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1975
1976static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1977{
1978 return pid_vnr(task_tgid(tsk));
1979}
1980
1981
1982static inline int pid_alive(const struct task_struct *p);
1983static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1984{
1985 pid_t pid = 0;
1986
1987 rcu_read_lock();
1988 if (pid_alive(tsk))
1989 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1990 rcu_read_unlock();
1991
1992 return pid;
1993}
1994
1995static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1996{
1997 return task_ppid_nr_ns(tsk, &init_pid_ns);
1998}
1999
2000static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
2001 struct pid_namespace *ns)
2002{
2003 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
2004}
2005
2006static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
2007{
2008 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
2009}
2010
2011
2012static inline pid_t task_session_nr_ns(struct task_struct *tsk,
2013 struct pid_namespace *ns)
2014{
2015 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
2016}
2017
2018static inline pid_t task_session_vnr(struct task_struct *tsk)
2019{
2020 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
2021}
2022
2023
2024static inline pid_t task_pgrp_nr(struct task_struct *tsk)
2025{
2026 return task_pgrp_nr_ns(tsk, &init_pid_ns);
2027}
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039static inline int pid_alive(const struct task_struct *p)
2040{
2041 return p->pids[PIDTYPE_PID].pid != NULL;
2042}
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053static inline int is_global_init(struct task_struct *tsk)
2054{
2055 return task_tgid_nr(tsk) == 1;
2056}
2057
2058extern struct pid *cad_pid;
2059
2060extern void free_task(struct task_struct *tsk);
2061#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
2062
2063extern void __put_task_struct(struct task_struct *t);
2064
2065static inline void put_task_struct(struct task_struct *t)
2066{
2067 if (atomic_dec_and_test(&t->usage))
2068 __put_task_struct(t);
2069}
2070
2071#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2072extern void task_cputime(struct task_struct *t,
2073 cputime_t *utime, cputime_t *stime);
2074extern void task_cputime_scaled(struct task_struct *t,
2075 cputime_t *utimescaled, cputime_t *stimescaled);
2076extern cputime_t task_gtime(struct task_struct *t);
2077#else
2078static inline void task_cputime(struct task_struct *t,
2079 cputime_t *utime, cputime_t *stime)
2080{
2081 if (utime)
2082 *utime = t->utime;
2083 if (stime)
2084 *stime = t->stime;
2085}
2086
2087static inline void task_cputime_scaled(struct task_struct *t,
2088 cputime_t *utimescaled,
2089 cputime_t *stimescaled)
2090{
2091 if (utimescaled)
2092 *utimescaled = t->utimescaled;
2093 if (stimescaled)
2094 *stimescaled = t->stimescaled;
2095}
2096
2097static inline cputime_t task_gtime(struct task_struct *t)
2098{
2099 return t->gtime;
2100}
2101#endif
2102extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2103extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2104
2105
2106
2107
2108#define PF_EXITING 0x00000004
2109#define PF_EXITPIDONE 0x00000008
2110#define PF_VCPU 0x00000010
2111#define PF_WQ_WORKER 0x00000020
2112#define PF_FORKNOEXEC 0x00000040
2113#define PF_MCE_PROCESS 0x00000080
2114#define PF_SUPERPRIV 0x00000100
2115#define PF_DUMPCORE 0x00000200
2116#define PF_SIGNALED 0x00000400
2117#define PF_MEMALLOC 0x00000800
2118#define PF_NPROC_EXCEEDED 0x00001000
2119#define PF_USED_MATH 0x00002000
2120#define PF_USED_ASYNC 0x00004000
2121#define PF_NOFREEZE 0x00008000
2122#define PF_FROZEN 0x00010000
2123#define PF_FSTRANS 0x00020000
2124#define PF_KSWAPD 0x00040000
2125#define PF_MEMALLOC_NOIO 0x00080000
2126#define PF_LESS_THROTTLE 0x00100000
2127#define PF_KTHREAD 0x00200000
2128#define PF_RANDOMIZE 0x00400000
2129#define PF_SWAPWRITE 0x00800000
2130#define PF_NO_SETAFFINITY 0x04000000
2131#define PF_MCE_EARLY 0x08000000
2132#define PF_MUTEX_TESTER 0x20000000
2133#define PF_FREEZER_SKIP 0x40000000
2134#define PF_SUSPEND_TASK 0x80000000
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
2148#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
2149#define clear_used_math() clear_stopped_child_used_math(current)
2150#define set_used_math() set_stopped_child_used_math(current)
2151#define conditional_stopped_child_used_math(condition, child) \
2152 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
2153#define conditional_used_math(condition) \
2154 conditional_stopped_child_used_math(condition, current)
2155#define copy_to_stopped_child_used_math(child) \
2156 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
2157
2158#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
2159#define used_math() tsk_used_math(current)
2160
2161
2162
2163
2164static inline gfp_t memalloc_noio_flags(gfp_t flags)
2165{
2166 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2167 flags &= ~(__GFP_IO | __GFP_FS);
2168 return flags;
2169}
2170
2171static inline unsigned int memalloc_noio_save(void)
2172{
2173 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2174 current->flags |= PF_MEMALLOC_NOIO;
2175 return flags;
2176}
2177
2178static inline void memalloc_noio_restore(unsigned int flags)
2179{
2180 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2181}
2182
2183
2184#define PFA_NO_NEW_PRIVS 0
2185#define PFA_SPREAD_PAGE 1
2186#define PFA_SPREAD_SLAB 2
2187
2188
2189#define TASK_PFA_TEST(name, func) \
2190 static inline bool task_##func(struct task_struct *p) \
2191 { return test_bit(PFA_##name, &p->atomic_flags); }
2192#define TASK_PFA_SET(name, func) \
2193 static inline void task_set_##func(struct task_struct *p) \
2194 { set_bit(PFA_##name, &p->atomic_flags); }
2195#define TASK_PFA_CLEAR(name, func) \
2196 static inline void task_clear_##func(struct task_struct *p) \
2197 { clear_bit(PFA_##name, &p->atomic_flags); }
2198
2199TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2200TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2201
2202TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2203TASK_PFA_SET(SPREAD_PAGE, spread_page)
2204TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2205
2206TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2207TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2208TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2209
2210
2211
2212
2213#define JOBCTL_STOP_SIGMASK 0xffff
2214
2215#define JOBCTL_STOP_DEQUEUED_BIT 16
2216#define JOBCTL_STOP_PENDING_BIT 17
2217#define JOBCTL_STOP_CONSUME_BIT 18
2218#define JOBCTL_TRAP_STOP_BIT 19
2219#define JOBCTL_TRAP_NOTIFY_BIT 20
2220#define JOBCTL_TRAPPING_BIT 21
2221#define JOBCTL_LISTENING_BIT 22
2222
2223#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
2224#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
2225#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
2226#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
2227#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
2228#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
2229#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
2230
2231#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2232#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2233
2234extern bool task_set_jobctl_pending(struct task_struct *task,
2235 unsigned long mask);
2236extern void task_clear_jobctl_trapping(struct task_struct *task);
2237extern void task_clear_jobctl_pending(struct task_struct *task,
2238 unsigned long mask);
2239
2240static inline void rcu_copy_process(struct task_struct *p)
2241{
2242#ifdef CONFIG_PREEMPT_RCU
2243 p->rcu_read_lock_nesting = 0;
2244 p->rcu_read_unlock_special.s = 0;
2245 p->rcu_blocked_node = NULL;
2246 INIT_LIST_HEAD(&p->rcu_node_entry);
2247#endif
2248#ifdef CONFIG_TASKS_RCU
2249 p->rcu_tasks_holdout = false;
2250 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2251 p->rcu_tasks_idle_cpu = -1;
2252#endif
2253}
2254
2255static inline void tsk_restore_flags(struct task_struct *task,
2256 unsigned long orig_flags, unsigned long flags)
2257{
2258 task->flags &= ~flags;
2259 task->flags |= orig_flags & flags;
2260}
2261
2262extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2263 const struct cpumask *trial);
2264extern int task_can_attach(struct task_struct *p,
2265 const struct cpumask *cs_cpus_allowed);
2266#ifdef CONFIG_SMP
2267extern void do_set_cpus_allowed(struct task_struct *p,
2268 const struct cpumask *new_mask);
2269
2270extern int set_cpus_allowed_ptr(struct task_struct *p,
2271 const struct cpumask *new_mask);
2272#else
2273static inline void do_set_cpus_allowed(struct task_struct *p,
2274 const struct cpumask *new_mask)
2275{
2276}
2277static inline int set_cpus_allowed_ptr(struct task_struct *p,
2278 const struct cpumask *new_mask)
2279{
2280 if (!cpumask_test_cpu(0, new_mask))
2281 return -EINVAL;
2282 return 0;
2283}
2284#endif
2285
2286#ifdef CONFIG_NO_HZ_COMMON
2287void calc_load_enter_idle(void);
2288void calc_load_exit_idle(void);
2289#else
2290static inline void calc_load_enter_idle(void) { }
2291static inline void calc_load_exit_idle(void) { }
2292#endif
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302extern unsigned long long notrace sched_clock(void);
2303
2304
2305
2306extern u64 cpu_clock(int cpu);
2307extern u64 local_clock(void);
2308extern u64 running_clock(void);
2309extern u64 sched_clock_cpu(int cpu);
2310
2311
2312extern void sched_clock_init(void);
2313
2314#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2315static inline void sched_clock_tick(void)
2316{
2317}
2318
2319static inline void sched_clock_idle_sleep_event(void)
2320{
2321}
2322
2323static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2324{
2325}
2326#else
2327
2328
2329
2330
2331
2332
2333extern int sched_clock_stable(void);
2334extern void set_sched_clock_stable(void);
2335extern void clear_sched_clock_stable(void);
2336
2337extern void sched_clock_tick(void);
2338extern void sched_clock_idle_sleep_event(void);
2339extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2340#endif
2341
2342#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2343
2344
2345
2346
2347
2348extern void enable_sched_clock_irqtime(void);
2349extern void disable_sched_clock_irqtime(void);
2350#else
2351static inline void enable_sched_clock_irqtime(void) {}
2352static inline void disable_sched_clock_irqtime(void) {}
2353#endif
2354
2355extern unsigned long long
2356task_sched_runtime(struct task_struct *task);
2357
2358
2359#ifdef CONFIG_SMP
2360extern void sched_exec(void);
2361#else
2362#define sched_exec() {}
2363#endif
2364
2365extern void sched_clock_idle_sleep_event(void);
2366extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2367
2368#ifdef CONFIG_HOTPLUG_CPU
2369extern void idle_task_exit(void);
2370#else
2371static inline void idle_task_exit(void) {}
2372#endif
2373
2374#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2375extern void wake_up_nohz_cpu(int cpu);
2376#else
2377static inline void wake_up_nohz_cpu(int cpu) { }
2378#endif
2379
2380#ifdef CONFIG_NO_HZ_FULL
2381extern u64 scheduler_tick_max_deferment(void);
2382#endif
2383
2384#ifdef CONFIG_SCHED_AUTOGROUP
2385extern void sched_autogroup_create_attach(struct task_struct *p);
2386extern void sched_autogroup_detach(struct task_struct *p);
2387extern void sched_autogroup_fork(struct signal_struct *sig);
2388extern void sched_autogroup_exit(struct signal_struct *sig);
2389#ifdef CONFIG_PROC_FS
2390extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2391extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2392#endif
2393#else
2394static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2395static inline void sched_autogroup_detach(struct task_struct *p) { }
2396static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2397static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2398#endif
2399
2400extern int yield_to(struct task_struct *p, bool preempt);
2401extern void set_user_nice(struct task_struct *p, long nice);
2402extern int task_prio(const struct task_struct *p);
2403
2404
2405
2406
2407
2408
2409static inline int task_nice(const struct task_struct *p)
2410{
2411 return PRIO_TO_NICE((p)->static_prio);
2412}
2413extern int can_nice(const struct task_struct *p, const int nice);
2414extern int task_curr(const struct task_struct *p);
2415extern int idle_cpu(int cpu);
2416extern int sched_setscheduler(struct task_struct *, int,
2417 const struct sched_param *);
2418extern int sched_setscheduler_nocheck(struct task_struct *, int,
2419 const struct sched_param *);
2420extern int sched_setattr(struct task_struct *,
2421 const struct sched_attr *);
2422extern struct task_struct *idle_task(int cpu);
2423
2424
2425
2426
2427
2428
2429static inline bool is_idle_task(const struct task_struct *p)
2430{
2431 return p->pid == 0;
2432}
2433extern struct task_struct *curr_task(int cpu);
2434extern void set_curr_task(int cpu, struct task_struct *p);
2435
2436void yield(void);
2437
2438union thread_union {
2439 struct thread_info thread_info;
2440 unsigned long stack[THREAD_SIZE/sizeof(long)];
2441};
2442
2443#ifndef __HAVE_ARCH_KSTACK_END
2444static inline int kstack_end(void *addr)
2445{
2446
2447
2448
2449 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2450}
2451#endif
2452
2453extern union thread_union init_thread_union;
2454extern struct task_struct init_task;
2455
2456extern struct mm_struct init_mm;
2457
2458extern struct pid_namespace init_pid_ns;
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471extern struct task_struct *find_task_by_vpid(pid_t nr);
2472extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2473 struct pid_namespace *ns);
2474
2475
2476extern struct user_struct * alloc_uid(kuid_t);
2477static inline struct user_struct *get_uid(struct user_struct *u)
2478{
2479 atomic_inc(&u->__count);
2480 return u;
2481}
2482extern void free_uid(struct user_struct *);
2483
2484#include <asm/current.h>
2485
2486extern void xtime_update(unsigned long ticks);
2487
2488extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2489extern int wake_up_process(struct task_struct *tsk);
2490extern void wake_up_new_task(struct task_struct *tsk);
2491#ifdef CONFIG_SMP
2492 extern void kick_process(struct task_struct *tsk);
2493#else
2494 static inline void kick_process(struct task_struct *tsk) { }
2495#endif
2496extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2497extern void sched_dead(struct task_struct *p);
2498
2499extern void proc_caches_init(void);
2500extern void flush_signals(struct task_struct *);
2501extern void ignore_signals(struct task_struct *);
2502extern void flush_signal_handlers(struct task_struct *, int force_default);
2503extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2504
2505static inline int kernel_dequeue_signal(siginfo_t *info)
2506{
2507 struct task_struct *tsk = current;
2508 siginfo_t __info;
2509 int ret;
2510
2511 spin_lock_irq(&tsk->sighand->siglock);
2512 ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
2513 spin_unlock_irq(&tsk->sighand->siglock);
2514
2515 return ret;
2516}
2517
2518static inline void kernel_signal_stop(void)
2519{
2520 spin_lock_irq(¤t->sighand->siglock);
2521 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
2522 __set_current_state(TASK_STOPPED);
2523 spin_unlock_irq(¤t->sighand->siglock);
2524
2525 schedule();
2526}
2527
2528extern void release_task(struct task_struct * p);
2529extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2530extern int force_sigsegv(int, struct task_struct *);
2531extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2532extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2533extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2534extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2535 const struct cred *, u32);
2536extern int kill_pgrp(struct pid *pid, int sig, int priv);
2537extern int kill_pid(struct pid *pid, int sig, int priv);
2538extern int kill_proc_info(int, struct siginfo *, pid_t);
2539extern __must_check bool do_notify_parent(struct task_struct *, int);
2540extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2541extern void force_sig(int, struct task_struct *);
2542extern int send_sig(int, struct task_struct *, int);
2543extern int zap_other_threads(struct task_struct *p);
2544extern struct sigqueue *sigqueue_alloc(void);
2545extern void sigqueue_free(struct sigqueue *);
2546extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2547extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2548
2549static inline void restore_saved_sigmask(void)
2550{
2551 if (test_and_clear_restore_sigmask())
2552 __set_current_blocked(¤t->saved_sigmask);
2553}
2554
2555static inline sigset_t *sigmask_to_save(void)
2556{
2557 sigset_t *res = ¤t->blocked;
2558 if (unlikely(test_restore_sigmask()))
2559 res = ¤t->saved_sigmask;
2560 return res;
2561}
2562
2563static inline int kill_cad_pid(int sig, int priv)
2564{
2565 return kill_pid(cad_pid, sig, priv);
2566}
2567
2568
2569#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2570#define SEND_SIG_PRIV ((struct siginfo *) 1)
2571#define SEND_SIG_FORCED ((struct siginfo *) 2)
2572
2573
2574
2575
2576static inline int on_sig_stack(unsigned long sp)
2577{
2578#ifdef CONFIG_STACK_GROWSUP
2579 return sp >= current->sas_ss_sp &&
2580 sp - current->sas_ss_sp < current->sas_ss_size;
2581#else
2582 return sp > current->sas_ss_sp &&
2583 sp - current->sas_ss_sp <= current->sas_ss_size;
2584#endif
2585}
2586
2587static inline int sas_ss_flags(unsigned long sp)
2588{
2589 if (!current->sas_ss_size)
2590 return SS_DISABLE;
2591
2592 return on_sig_stack(sp) ? SS_ONSTACK : 0;
2593}
2594
2595static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2596{
2597 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2598#ifdef CONFIG_STACK_GROWSUP
2599 return current->sas_ss_sp;
2600#else
2601 return current->sas_ss_sp + current->sas_ss_size;
2602#endif
2603 return sp;
2604}
2605
2606
2607
2608
2609extern struct mm_struct * mm_alloc(void);
2610
2611
2612extern void __mmdrop(struct mm_struct *);
2613static inline void mmdrop(struct mm_struct * mm)
2614{
2615 if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2616 __mmdrop(mm);
2617}
2618
2619
2620extern void mmput(struct mm_struct *);
2621
2622extern struct mm_struct *get_task_mm(struct task_struct *task);
2623
2624
2625
2626
2627
2628extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2629
2630extern void mm_release(struct task_struct *, struct mm_struct *);
2631
2632#ifdef CONFIG_HAVE_COPY_THREAD_TLS
2633extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
2634 struct task_struct *, unsigned long);
2635#else
2636extern int copy_thread(unsigned long, unsigned long, unsigned long,
2637 struct task_struct *);
2638
2639
2640
2641static inline int copy_thread_tls(
2642 unsigned long clone_flags, unsigned long sp, unsigned long arg,
2643 struct task_struct *p, unsigned long tls)
2644{
2645 return copy_thread(clone_flags, sp, arg, p);
2646}
2647#endif
2648extern void flush_thread(void);
2649extern void exit_thread(void);
2650
2651extern void exit_files(struct task_struct *);
2652extern void __cleanup_sighand(struct sighand_struct *);
2653
2654extern void exit_itimers(struct signal_struct *);
2655extern void flush_itimer_signals(void);
2656
2657extern void do_group_exit(int);
2658
2659extern int do_execve(struct filename *,
2660 const char __user * const __user *,
2661 const char __user * const __user *);
2662extern int do_execveat(int, struct filename *,
2663 const char __user * const __user *,
2664 const char __user * const __user *,
2665 int);
2666extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
2667extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2668struct task_struct *fork_idle(int);
2669extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2670
2671extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2672static inline void set_task_comm(struct task_struct *tsk, const char *from)
2673{
2674 __set_task_comm(tsk, from, false);
2675}
2676extern char *get_task_comm(char *to, struct task_struct *tsk);
2677
2678#ifdef CONFIG_SMP
2679void scheduler_ipi(void);
2680extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2681#else
2682static inline void scheduler_ipi(void) { }
2683static inline unsigned long wait_task_inactive(struct task_struct *p,
2684 long match_state)
2685{
2686 return 1;
2687}
2688#endif
2689
2690#define tasklist_empty() \
2691 list_empty(&init_task.tasks)
2692
2693#define next_task(p) \
2694 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2695
2696#define for_each_process(p) \
2697 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2698
2699extern bool current_is_single_threaded(void);
2700
2701
2702
2703
2704
2705#define do_each_thread(g, t) \
2706 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2707
2708#define while_each_thread(g, t) \
2709 while ((t = next_thread(t)) != g)
2710
2711#define __for_each_thread(signal, t) \
2712 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2713
2714#define for_each_thread(p, t) \
2715 __for_each_thread((p)->signal, t)
2716
2717
2718#define for_each_process_thread(p, t) \
2719 for_each_process(p) for_each_thread(p, t)
2720
2721static inline int get_nr_threads(struct task_struct *tsk)
2722{
2723 return tsk->signal->nr_threads;
2724}
2725
2726static inline bool thread_group_leader(struct task_struct *p)
2727{
2728 return p->exit_signal >= 0;
2729}
2730
2731
2732
2733
2734
2735
2736
2737static inline bool has_group_leader_pid(struct task_struct *p)
2738{
2739 return task_pid(p) == p->signal->leader_pid;
2740}
2741
2742static inline
2743bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2744{
2745 return p1->signal == p2->signal;
2746}
2747
2748static inline struct task_struct *next_thread(const struct task_struct *p)
2749{
2750 return list_entry_rcu(p->thread_group.next,
2751 struct task_struct, thread_group);
2752}
2753
2754static inline int thread_group_empty(struct task_struct *p)
2755{
2756 return list_empty(&p->thread_group);
2757}
2758
2759#define delay_group_leader(p) \
2760 (thread_group_leader(p) && !thread_group_empty(p))
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772static inline void task_lock(struct task_struct *p)
2773{
2774 spin_lock(&p->alloc_lock);
2775}
2776
2777static inline void task_unlock(struct task_struct *p)
2778{
2779 spin_unlock(&p->alloc_lock);
2780}
2781
2782extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2783 unsigned long *flags);
2784
2785static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2786 unsigned long *flags)
2787{
2788 struct sighand_struct *ret;
2789
2790 ret = __lock_task_sighand(tsk, flags);
2791 (void)__cond_lock(&tsk->sighand->siglock, ret);
2792 return ret;
2793}
2794
2795static inline void unlock_task_sighand(struct task_struct *tsk,
2796 unsigned long *flags)
2797{
2798 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2799}
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812static inline void threadgroup_change_begin(struct task_struct *tsk)
2813{
2814 might_sleep();
2815 cgroup_threadgroup_change_begin(tsk);
2816}
2817
2818
2819
2820
2821
2822
2823
2824static inline void threadgroup_change_end(struct task_struct *tsk)
2825{
2826 cgroup_threadgroup_change_end(tsk);
2827}
2828
2829#ifndef __HAVE_THREAD_FUNCTIONS
2830
2831#define task_thread_info(task) ((struct thread_info *)(task)->stack)
2832#define task_stack_page(task) ((task)->stack)
2833
2834static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2835{
2836 *task_thread_info(p) = *task_thread_info(org);
2837 task_thread_info(p)->task = p;
2838}
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849static inline unsigned long *end_of_stack(struct task_struct *p)
2850{
2851#ifdef CONFIG_STACK_GROWSUP
2852 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2853#else
2854 return (unsigned long *)(task_thread_info(p) + 1);
2855#endif
2856}
2857
2858#endif
2859#define task_stack_end_corrupted(task) \
2860 (*(end_of_stack(task)) != STACK_END_MAGIC)
2861
2862static inline int object_is_on_stack(void *obj)
2863{
2864 void *stack = task_stack_page(current);
2865
2866 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2867}
2868
2869extern void thread_info_cache_init(void);
2870
2871#ifdef CONFIG_DEBUG_STACK_USAGE
2872static inline unsigned long stack_not_used(struct task_struct *p)
2873{
2874 unsigned long *n = end_of_stack(p);
2875
2876 do {
2877# ifdef CONFIG_STACK_GROWSUP
2878 n--;
2879# else
2880 n++;
2881# endif
2882 } while (!*n);
2883
2884# ifdef CONFIG_STACK_GROWSUP
2885 return (unsigned long)end_of_stack(p) - (unsigned long)n;
2886# else
2887 return (unsigned long)n - (unsigned long)end_of_stack(p);
2888# endif
2889}
2890#endif
2891extern void set_task_stack_end_magic(struct task_struct *tsk);
2892
2893
2894
2895
2896static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2897{
2898 set_ti_thread_flag(task_thread_info(tsk), flag);
2899}
2900
2901static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2902{
2903 clear_ti_thread_flag(task_thread_info(tsk), flag);
2904}
2905
2906static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2907{
2908 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2909}
2910
2911static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2912{
2913 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2914}
2915
2916static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2917{
2918 return test_ti_thread_flag(task_thread_info(tsk), flag);
2919}
2920
2921static inline void set_tsk_need_resched(struct task_struct *tsk)
2922{
2923 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2924}
2925
2926static inline void clear_tsk_need_resched(struct task_struct *tsk)
2927{
2928 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2929}
2930
2931static inline int test_tsk_need_resched(struct task_struct *tsk)
2932{
2933 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2934}
2935
2936static inline int restart_syscall(void)
2937{
2938 set_tsk_thread_flag(current, TIF_SIGPENDING);
2939 return -ERESTARTNOINTR;
2940}
2941
2942static inline int signal_pending(struct task_struct *p)
2943{
2944 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2945}
2946
2947static inline int __fatal_signal_pending(struct task_struct *p)
2948{
2949 return unlikely(sigismember(&p->pending.signal, SIGKILL));
2950}
2951
2952static inline int fatal_signal_pending(struct task_struct *p)
2953{
2954 return signal_pending(p) && __fatal_signal_pending(p);
2955}
2956
2957static inline int signal_pending_state(long state, struct task_struct *p)
2958{
2959 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2960 return 0;
2961 if (!signal_pending(p))
2962 return 0;
2963
2964 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2965}
2966
2967
2968
2969
2970
2971
2972
2973
2974extern int _cond_resched(void);
2975
2976#define cond_resched() ({ \
2977 ___might_sleep(__FILE__, __LINE__, 0); \
2978 _cond_resched(); \
2979})
2980
2981extern int __cond_resched_lock(spinlock_t *lock);
2982
2983#define cond_resched_lock(lock) ({ \
2984 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2985 __cond_resched_lock(lock); \
2986})
2987
2988extern int __cond_resched_softirq(void);
2989
2990#define cond_resched_softirq() ({ \
2991 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2992 __cond_resched_softirq(); \
2993})
2994
2995static inline void cond_resched_rcu(void)
2996{
2997#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2998 rcu_read_unlock();
2999 cond_resched();
3000 rcu_read_lock();
3001#endif
3002}
3003
3004
3005
3006
3007
3008
3009static inline int spin_needbreak(spinlock_t *lock)
3010{
3011#ifdef CONFIG_PREEMPT
3012 return spin_is_contended(lock);
3013#else
3014 return 0;
3015#endif
3016}
3017
3018
3019
3020
3021
3022#ifdef TIF_POLLING_NRFLAG
3023static inline int tsk_is_polling(struct task_struct *p)
3024{
3025 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
3026}
3027
3028static inline void __current_set_polling(void)
3029{
3030 set_thread_flag(TIF_POLLING_NRFLAG);
3031}
3032
3033static inline bool __must_check current_set_polling_and_test(void)
3034{
3035 __current_set_polling();
3036
3037
3038
3039
3040
3041 smp_mb__after_atomic();
3042
3043 return unlikely(tif_need_resched());
3044}
3045
3046static inline void __current_clr_polling(void)
3047{
3048 clear_thread_flag(TIF_POLLING_NRFLAG);
3049}
3050
3051static inline bool __must_check current_clr_polling_and_test(void)
3052{
3053 __current_clr_polling();
3054
3055
3056
3057
3058
3059 smp_mb__after_atomic();
3060
3061 return unlikely(tif_need_resched());
3062}
3063
3064#else
3065static inline int tsk_is_polling(struct task_struct *p) { return 0; }
3066static inline void __current_set_polling(void) { }
3067static inline void __current_clr_polling(void) { }
3068
3069static inline bool __must_check current_set_polling_and_test(void)
3070{
3071 return unlikely(tif_need_resched());
3072}
3073static inline bool __must_check current_clr_polling_and_test(void)
3074{
3075 return unlikely(tif_need_resched());
3076}
3077#endif
3078
3079static inline void current_clr_polling(void)
3080{
3081 __current_clr_polling();
3082
3083
3084
3085
3086
3087
3088
3089 smp_mb();
3090
3091 preempt_fold_need_resched();
3092}
3093
3094static __always_inline bool need_resched(void)
3095{
3096 return unlikely(tif_need_resched());
3097}
3098
3099
3100
3101
3102void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3103void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
3104
3105
3106
3107
3108
3109
3110
3111extern void recalc_sigpending_and_wake(struct task_struct *t);
3112extern void recalc_sigpending(void);
3113
3114extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
3115
3116static inline void signal_wake_up(struct task_struct *t, bool resume)
3117{
3118 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
3119}
3120static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
3121{
3122 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
3123}
3124
3125
3126
3127
3128#ifdef CONFIG_SMP
3129
3130static inline unsigned int task_cpu(const struct task_struct *p)
3131{
3132 return task_thread_info(p)->cpu;
3133}
3134
3135static inline int task_node(const struct task_struct *p)
3136{
3137 return cpu_to_node(task_cpu(p));
3138}
3139
3140extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
3141
3142#else
3143
3144static inline unsigned int task_cpu(const struct task_struct *p)
3145{
3146 return 0;
3147}
3148
3149static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
3150{
3151}
3152
3153#endif
3154
3155extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3156extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3157
3158#ifdef CONFIG_CGROUP_SCHED
3159extern struct task_group root_task_group;
3160#endif
3161
3162extern int task_can_switch_user(struct user_struct *up,
3163 struct task_struct *tsk);
3164
3165#ifdef CONFIG_TASK_XACCT
3166static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3167{
3168 tsk->ioac.rchar += amt;
3169}
3170
3171static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3172{
3173 tsk->ioac.wchar += amt;
3174}
3175
3176static inline void inc_syscr(struct task_struct *tsk)
3177{
3178 tsk->ioac.syscr++;
3179}
3180
3181static inline void inc_syscw(struct task_struct *tsk)
3182{
3183 tsk->ioac.syscw++;
3184}
3185#else
3186static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3187{
3188}
3189
3190static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3191{
3192}
3193
3194static inline void inc_syscr(struct task_struct *tsk)
3195{
3196}
3197
3198static inline void inc_syscw(struct task_struct *tsk)
3199{
3200}
3201#endif
3202
3203#ifndef TASK_SIZE_OF
3204#define TASK_SIZE_OF(tsk) TASK_SIZE
3205#endif
3206
3207#ifdef CONFIG_MEMCG
3208extern void mm_update_next_owner(struct mm_struct *mm);
3209#else
3210static inline void mm_update_next_owner(struct mm_struct *mm)
3211{
3212}
3213#endif
3214
3215static inline unsigned long task_rlimit(const struct task_struct *tsk,
3216 unsigned int limit)
3217{
3218 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
3219}
3220
3221static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3222 unsigned int limit)
3223{
3224 return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
3225}
3226
3227static inline unsigned long rlimit(unsigned int limit)
3228{
3229 return task_rlimit(current, limit);
3230}
3231
3232static inline unsigned long rlimit_max(unsigned int limit)
3233{
3234 return task_rlimit_max(current, limit);
3235}
3236
3237#ifdef CONFIG_CPU_FREQ
3238struct update_util_data {
3239 void (*func)(struct update_util_data *data,
3240 u64 time, unsigned long util, unsigned long max);
3241};
3242
3243void cpufreq_set_update_util_data(int cpu, struct update_util_data *data);
3244#endif
3245
3246#endif
3247